deepagents_sourcecode update
This commit is contained in:
110
context-engineering-more-deep_research_agent/__init__.py
Normal file
110
context-engineering-more-deep_research_agent/__init__.py
Normal file
@@ -0,0 +1,110 @@
|
||||
"""Context Engineering 연구용 DeepAgent 모듈.
|
||||
|
||||
이 모듈은 DeepAgents 라이브러리의 Context Engineering 전략을
|
||||
명시적으로 구현하고 문서화한 연구용 에이전트입니다.
|
||||
|
||||
## Context Engineering 5가지 핵심 전략
|
||||
|
||||
1. **Context Offloading (컨텍스트 오프로딩)**
|
||||
- 대용량 도구 결과를 파일시스템으로 축출
|
||||
- 메시지에는 파일 경로만 남기고 실제 데이터는 외부 저장
|
||||
- FilesystemMiddleware의 tool_token_limit_before_evict 파라미터로 제어
|
||||
- 기본값: 20,000 토큰 초과 시 자동 축출
|
||||
|
||||
2. **Context Reduction (컨텍스트 축소)**
|
||||
- Compaction: 오래된 메시지의 도구 호출/결과 제거
|
||||
- Summarization: 컨텍스트가 임계값 초과 시 대화 요약
|
||||
- SummarizationMiddleware: 85% 컨텍스트 사용 시 트리거
|
||||
- 핵심 정보만 유지하고 세부사항 압축
|
||||
|
||||
3. **Context Retrieval (컨텍스트 검색)**
|
||||
- grep/glob 기반의 단순하고 빠른 검색
|
||||
- 벡터 DB나 복잡한 인덱싱 없이 직접 파일 검색
|
||||
- 필요한 정보만 선택적으로 로드
|
||||
- FilesystemMiddleware의 read_file, grep, glob 도구
|
||||
|
||||
4. **Context Isolation (컨텍스트 격리)**
|
||||
- SubAgent를 통한 독립된 컨텍스트 윈도우
|
||||
- 메인 에이전트와 상태 비공유
|
||||
- 복잡한 하위 작업을 격리된 환경에서 처리
|
||||
- SubAgentMiddleware의 task() 도구
|
||||
|
||||
5. **Context Caching (컨텍스트 캐싱)**
|
||||
- Anthropic Prompt Caching으로 시스템 프롬프트 캐싱
|
||||
- KV Cache 효율화로 비용 절감
|
||||
- AnthropicPromptCachingMiddleware로 구현
|
||||
|
||||
## 모듈 구조
|
||||
|
||||
```
|
||||
context-engineering-more-deep_research_agent/
|
||||
├── __init__.py # 이 파일
|
||||
├── agent.py # 메인 에이전트 (5가지 전략 통합)
|
||||
├── prompts.py # 시스템 프롬프트
|
||||
├── tools.py # 연구 도구
|
||||
├── utils.py # 유틸리티
|
||||
├── backends/ # 백엔드 구현
|
||||
│ ├── __init__.py
|
||||
│ ├── pyodide_sandbox.py # WASM 기반 안전한 Python 실행
|
||||
│ └── docker_shared.py # Docker 공유 작업공간
|
||||
├── context_strategies/ # Context Engineering 전략
|
||||
│ ├── __init__.py
|
||||
│ ├── offloading.py # 1. Context Offloading
|
||||
│ ├── reduction.py # 2. Context Reduction
|
||||
│ ├── retrieval.py # 3. Context Retrieval
|
||||
│ ├── isolation.py # 4. Context Isolation
|
||||
│ └── caching.py # 5. Context Caching
|
||||
├── research/ # 연구 에이전트
|
||||
│ ├── __init__.py
|
||||
│ ├── agent.py
|
||||
│ └── prompts.py
|
||||
└── skills/ # 스킬 미들웨어
|
||||
└── middleware.py
|
||||
```
|
||||
|
||||
## 사용 예시
|
||||
|
||||
```python
|
||||
from context_engineering_research_agent import agent
|
||||
|
||||
# 에이전트 실행
|
||||
result = agent.invoke({
|
||||
"messages": [{"role": "user", "content": "Context Engineering 전략 연구"}]
|
||||
})
|
||||
```
|
||||
|
||||
## 참고 자료
|
||||
|
||||
- DeepAgents 공식 문서: https://docs.langchain.com/oss/python/deepagents/overview
|
||||
- Anthropic Prompt Caching: https://docs.anthropic.com/claude/docs/prompt-caching
|
||||
- LangGraph: https://docs.langchain.com/oss/python/langgraph/overview
|
||||
"""
|
||||
|
||||
# 버전 정보
|
||||
__version__ = "0.1.0"
|
||||
__author__ = "Context Engineering Research Team"
|
||||
|
||||
# 주요 컴포넌트 export
|
||||
from context_engineering_more_deep_research_agent.agent import (
|
||||
agent,
|
||||
create_context_aware_agent,
|
||||
)
|
||||
from context_engineering_more_deep_research_agent.context_strategies import (
|
||||
ContextCachingStrategy,
|
||||
ContextIsolationStrategy,
|
||||
ContextOffloadingStrategy,
|
||||
ContextReductionStrategy,
|
||||
ContextRetrievalStrategy,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# 에이전트
|
||||
"agent",
|
||||
"create_context_aware_agent",
|
||||
# Context Engineering 전략
|
||||
"ContextOffloadingStrategy",
|
||||
"ContextReductionStrategy",
|
||||
"ContextRetrievalStrategy",
|
||||
"ContextIsolationStrategy",
|
||||
"ContextCachingStrategy",
|
||||
]
|
||||
5
deepagents_sourcecode/.gitignore
vendored
5
deepagents_sourcecode/.gitignore
vendored
@@ -213,3 +213,8 @@ __marimo__/
|
||||
.claude
|
||||
|
||||
.idea
|
||||
TEXTUAL_REFACTOR_PLAN.md
|
||||
libs/deepagents-cli/TEXTUAL_PROGRESS.md
|
||||
|
||||
/tmp/
|
||||
*/tmp/
|
||||
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Harrison Chase
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -1,28 +1,30 @@
|
||||
# 🚀🧠 Deep Agents
|
||||
|
||||
Agents can increasingly tackle long-horizon tasks, [with agent task length doubling every 7 months](https://metr.org/blog/2025-03-19-measuring-ai-ability-to-complete-long-tasks/)! But, long horizon tasks often span dozens of tool calls, which present cost and reliability challenges. Popular agents such as [Claude Code](https://code.claude.com/docs) and [Manus](https://www.youtube.com/watch?v=6_BcCthVvb8) use some common principles to address these challenges, including **planning** (prior to task execution), **computer access** (giving the agent access to a shell and a filesystem), and **sub-agent delegation** (isolated task execution). `deepagents` is a simple agent harness that implements these tools, but is open source and easily extendable with your own custom tools and instructions.
|
||||
|
||||
<img src=".github/images/deepagents_banner.png" alt="deep agent" width="100%"/>
|
||||
Agents can increasingly tackle long-horizon tasks, [with agent task length doubling every 7 months](https://metr.org/blog/2025-03-19-measuring-ai-ability-to-complete-long-tasks/)! But, long horizon tasks often span dozens of tool calls, which present cost and reliability challenges. Popular agents such as [Claude Code](https://code.claude.com/docs) and [Manus](https://www.youtube.com/watch?v=6_BcCthVvb8) use some common principles to address these challenges, including **planning** (prior to task execution), **computer access** (giving the agent access to a shell and a filesystem), and **sub-agent delegation** (isolated task execution). `deepagents` is a simple, open-source agent harness that implements these tools and is easily extensible with your own custom tools, instructions, and choice of LLM.
|
||||
|
||||
## 📚 Resources
|
||||
|
||||
- **[Documentation](https://docs.langchain.com/oss/python/deepagents/overview)** - Full overview and API reference
|
||||
- **[Korean Documentation](docs/DeepAgents_Documentation_KR.md)** - DeepAgents Technical Documentation (KR)
|
||||
- **[Quickstarts Repo](https://github.com/langchain-ai/deepagents-quickstarts)** - Examples and use-cases
|
||||
- **[CLI](libs/deepagents-cli/)** - Interactive command-line interface with skills, memory, and HITL workflows
|
||||
|
||||
## 🚀 Quickstart
|
||||
|
||||
You can give `deepagents` custom tools. Below, we'll optionally provide the `tavily` tool to search the web. This tool will be added to the `deepagents` build-in tools (see below).
|
||||
`deepagents` supports custom tools alongside its built-in tools (listed below). In this example, we'll add the optional `tavily` tool for web search.
|
||||
|
||||
```bash
|
||||
pip install deepagents tavily-python
|
||||
|
||||
# using uv
|
||||
uv init
|
||||
uv add deepagents tavily-python
|
||||
```
|
||||
|
||||
Set `TAVILY_API_KEY` in your environment ([get one here](https://www.tavily.com/)):
|
||||
|
||||
```python
|
||||
import os
|
||||
|
||||
from deepagents import create_deep_agent
|
||||
from tavily import TavilyClient
|
||||
|
||||
@@ -40,15 +42,15 @@ agent = create_deep_agent(
|
||||
result = agent.invoke({"messages": [{"role": "user", "content": "What is LangGraph?"}]})
|
||||
```
|
||||
|
||||
The agent created with `create_deep_agent` is compiled [LangGraph StateGraph](https://docs.langchain.com/oss/python/langgraph/overview), so it can be used with streaming, human-in-the-loop, memory, or Studio just like any LangGraph agent. See our [quickstarts repo](https://github.com/langchain-ai/deepagents-quickstarts) for more examples.
|
||||
The agent created with `create_deep_agent` is compiled [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview) `StateGraph`, so it can be used with streaming, human-in-the-loop, memory, or Studio just like any LangGraph agent. See our [quickstarts repo](https://github.com/langchain-ai/deepagents-quickstarts) for more examples.
|
||||
|
||||
## Customizing Deep Agents
|
||||
|
||||
There are several parameters you can pass to `create_deep_agent`.
|
||||
There are several parameters you can pass to [`create_deep_agent`](https://reference.langchain.com/python/deepagents/#deepagents.create_deep_agent).
|
||||
|
||||
### `model`
|
||||
|
||||
By default, `deepagents` uses `"claude-sonnet-4-5-20250929"`. You can customize this by passing any [LangChain model object](https://python.langchain.com/docs/integrations/chat/).
|
||||
By default, `deepagents` uses `claude-sonnet-4-5-20250929`. You can customize this by passing any [LangChain model object](https://python.langchain.com/docs/integrations/chat/).
|
||||
|
||||
```python
|
||||
from langchain.chat_models import init_chat_model
|
||||
@@ -80,6 +82,7 @@ When writing a custom system prompt, you should:
|
||||
|
||||
```python
|
||||
from deepagents import create_deep_agent
|
||||
|
||||
research_instructions = """your custom system prompt"""
|
||||
agent = create_deep_agent(
|
||||
system_prompt=research_instructions,
|
||||
@@ -102,7 +105,7 @@ def internet_search(query: str) -> str:
|
||||
agent = create_deep_agent(tools=[internet_search])
|
||||
```
|
||||
|
||||
You can also connect MCP tools via [langchain-mcp-adapters](https://github.com/langchain-ai/langchain-mcp-adapters):
|
||||
You can also connect MCP tools via [`langchain-mcp-adapters`](https://github.com/langchain-ai/langchain-mcp-adapters):
|
||||
|
||||
```python
|
||||
from langchain_mcp_adapters.client import MultiServerMCPClient
|
||||
@@ -216,10 +219,10 @@ agent = create_deep_agent(
|
||||
|
||||
Available backends include:
|
||||
|
||||
- **StateBackend** (default): Ephemeral files stored in agent state
|
||||
- **FilesystemBackend**: Real disk operations under a root directory
|
||||
- **StoreBackend**: Persistent storage using LangGraph Store
|
||||
- **CompositeBackend**: Route different paths to different backends
|
||||
- **`StateBackend`** (default): Ephemeral files stored in agent state
|
||||
- **`FilesystemBackend`**: Real disk operations under a root directory
|
||||
- **`StoreBackend`**: Persistent storage using LangGraph Store
|
||||
- **`CompositeBackend`**: Route different paths to different backends
|
||||
|
||||
See the [backends documentation](https://docs.langchain.com/oss/python/deepagents/backends) for more details.
|
||||
|
||||
@@ -259,16 +262,16 @@ Every deep agent created with `create_deep_agent` comes with a standard set of t
|
||||
|
||||
| Tool Name | Description | Provided By |
|
||||
|-----------|-------------|-------------|
|
||||
| `write_todos` | Create and manage structured task lists for tracking progress through complex workflows | TodoListMiddleware |
|
||||
| `read_todos` | Read the current todo list state | TodoListMiddleware |
|
||||
| `ls` | List all files in a directory (requires absolute path) | FilesystemMiddleware |
|
||||
| `read_file` | Read content from a file with optional pagination (offset/limit parameters) | FilesystemMiddleware |
|
||||
| `write_file` | Create a new file or completely overwrite an existing file | FilesystemMiddleware |
|
||||
| `edit_file` | Perform exact string replacements in files | FilesystemMiddleware |
|
||||
| `glob` | Find files matching a pattern (e.g., `**/*.py`) | FilesystemMiddleware |
|
||||
| `grep` | Search for text patterns within files | FilesystemMiddleware |
|
||||
| `execute`* | Run shell commands in a sandboxed environment | FilesystemMiddleware |
|
||||
| `task` | Delegate tasks to specialized sub-agents with isolated context windows | SubAgentMiddleware |
|
||||
| `write_todos` | Create and manage structured task lists for tracking progress through complex workflows | `TodoListMiddleware` |
|
||||
| `read_todos` | Read the current todo list state | `TodoListMiddleware` |
|
||||
| `ls` | List all files in a directory (requires absolute path) | `FilesystemMiddleware` |
|
||||
| `read_file` | Read content from a file with optional pagination (offset/limit parameters) | `FilesystemMiddleware` |
|
||||
| `write_file` | Create a new file or completely overwrite an existing file | `FilesystemMiddleware` |
|
||||
| `edit_file` | Perform exact string replacements in files | `FilesystemMiddleware` |
|
||||
| `glob` | Find files matching a pattern (e.g., `**/*.py`) | `FilesystemMiddleware` |
|
||||
| `grep` | Search for text patterns within files | `FilesystemMiddleware` |
|
||||
| `execute`* | Run shell commands in a sandboxed environment | `FilesystemMiddleware` |
|
||||
| `task` | Delegate tasks to specialized sub-agents with isolated context windows | `SubAgentMiddleware` |
|
||||
|
||||
The `execute` tool is only available if the backend implements `SandboxBackendProtocol`. By default, it uses the in-memory state backend which does not support command execution. As shown, these tools (along with other capabilities) are provided by default middleware:
|
||||
|
||||
@@ -280,41 +283,41 @@ See the [agent harness documentation](https://docs.langchain.com/oss/python/deep
|
||||
|
||||
| Middleware | Purpose |
|
||||
|------------|---------|
|
||||
| **TodoListMiddleware** | Task planning and progress tracking |
|
||||
| **FilesystemMiddleware** | File operations and context offloading (auto-saves large results) |
|
||||
| **SubAgentMiddleware** | Delegate tasks to isolated sub-agents |
|
||||
| **SummarizationMiddleware** | Auto-summarizes when context exceeds 170k tokens |
|
||||
| **AnthropicPromptCachingMiddleware** | Caches system prompts to reduce costs (Anthropic only) |
|
||||
| **PatchToolCallsMiddleware** | Fixes dangling tool calls from interruptions |
|
||||
| **HumanInTheLoopMiddleware** | Pauses execution for human approval (requires `interrupt_on` config) |
|
||||
| **`TodoListMiddleware`** | Task planning and progress tracking |
|
||||
| **`FilesystemMiddleware`** | File operations and context offloading (auto-saves large results) |
|
||||
| **`SubAgentMiddleware`** | Delegate tasks to isolated sub-agents |
|
||||
| **`SummarizationMiddleware`** | Auto-summarizes when context exceeds 170k tokens |
|
||||
| **`AnthropicPromptCachingMiddleware`** | Caches system prompts to reduce costs (Anthropic only) |
|
||||
| **`PatchToolCallsMiddleware`** | Fixes dangling tool calls from interruptions |
|
||||
| **`HumanInTheLoopMiddleware`** | Pauses execution for human approval (requires `interrupt_on` config) |
|
||||
|
||||
## Built-in prompts
|
||||
|
||||
The middleware automatically adds instructions about the standard tools. Your custom instructions should **complement, not duplicate** these defaults:
|
||||
|
||||
#### From [TodoListMiddleware](https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/agents/middleware/todo.py)
|
||||
#### From [`TodoListMiddleware`](https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/agents/middleware/todo.py)
|
||||
|
||||
- Explains when to use `write_todos` and `read_todos`
|
||||
- Guidance on marking tasks completed
|
||||
- Best practices for todo list management
|
||||
- When NOT to use todos (simple tasks)
|
||||
|
||||
#### From [FilesystemMiddleware](libs/deepagents/deepagents/middleware/filesystem.py)
|
||||
#### From [`FilesystemMiddleware`](libs/deepagents/deepagents/middleware/filesystem.py)
|
||||
|
||||
- Lists all filesystem tools (`ls`, `read_file`, `write_file`, `edit_file`, `glob`, `grep`, `execute`*)
|
||||
- Explains that file paths must start with `/`
|
||||
- Describes each tool's purpose and parameters
|
||||
- Notes about context offloading for large tool results
|
||||
|
||||
#### From [SubAgentMiddleware](libs/deepagents/deepagents/middleware/subagents.py)
|
||||
#### From [`SubAgentMiddleware`](libs/deepagents/deepagents/middleware/subagents.py)
|
||||
|
||||
- Explains the `task()` tool for delegating to sub-agents
|
||||
- When to use sub-agents vs when NOT to use them
|
||||
- Guidance on parallel execution
|
||||
- Subagent lifecycle (spawn → run → return → reconcile)
|
||||
|
||||
## Security Considerations
|
||||
## Security considerations
|
||||
|
||||
### Trust Model
|
||||
### Trust model
|
||||
|
||||
Deepagents follows a "trust the LLM" model similar to Claude Code. The agent can perform any action the underlying tools allow. Security boundaries should be enforced at the tool/sandbox level, not by expecting the LLM to self-police.
|
||||
|
||||
60
deepagents_sourcecode/examples/ralph_mode/README.md
Normal file
60
deepagents_sourcecode/examples/ralph_mode/README.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# Ralph Mode for DeepAgents
|
||||
|
||||

|
||||
|
||||
## What is Ralph?
|
||||
|
||||
Ralph is an autonomous looping pattern created by [Geoff Huntley](https://ghuntley.com) that went viral in late 2025. The original implementation is literally one line:
|
||||
|
||||
```bash
|
||||
while :; do cat PROMPT.md | agent ; done
|
||||
```
|
||||
|
||||
Each loop starts with **fresh context**—the simplest pattern for context management. No conversation history to manage, no token limits to worry about. Just start fresh every iteration.
|
||||
|
||||
The filesystem and git allow the agent to track progress over time. This serves as its memory and worklog.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Install uv (if you don't have it)
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
|
||||
# Create a virtual environment
|
||||
uv venv
|
||||
source .venv/bin/activate
|
||||
|
||||
# Install the CLI
|
||||
uv pip install deepagents-cli
|
||||
|
||||
# Download the script (or copy from examples/ralph_mode/ if you have the repo)
|
||||
curl -O https://raw.githubusercontent.com/langchain-ai/deepagents/master/examples/ralph_mode/ralph_mode.py
|
||||
|
||||
# Run Ralph
|
||||
python ralph_mode.py "Build a Python programming course for beginners. Use git."
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Unlimited iterations (Ctrl+C to stop)
|
||||
python ralph_mode.py "Build a Python course"
|
||||
|
||||
# With iteration limit
|
||||
python ralph_mode.py "Build a REST API" --iterations 5
|
||||
|
||||
# With specific model
|
||||
python ralph_mode.py "Create a CLI tool" --model claude-haiku-4-5-20251001
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
1. **You provide a task** — declarative, what you want (not how)
|
||||
2. **Agent runs** — creates files, makes progress
|
||||
3. **Loop repeats** — same prompt, but files persist
|
||||
4. **You stop it** — Ctrl+C when satisfied
|
||||
|
||||
## Credits
|
||||
|
||||
- Original Ralph concept by [Geoff Huntley](https://ghuntley.com)
|
||||
- [Brief History of Ralph](https://www.humanlayer.dev/blog/brief-history-of-ralph) by HumanLayer
|
||||
108
deepagents_sourcecode/examples/ralph_mode/ralph_mode.py
Normal file
108
deepagents_sourcecode/examples/ralph_mode/ralph_mode.py
Normal file
@@ -0,0 +1,108 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Ralph Mode - Autonomous looping for DeepAgents
|
||||
|
||||
Ralph is an autonomous looping pattern created by Geoff Huntley.
|
||||
Each loop starts with fresh context. The filesystem and git serve as memory.
|
||||
|
||||
Usage:
|
||||
uv pip install deepagents-cli
|
||||
python ralph_mode.py "Build a Python course. Use git."
|
||||
python ralph_mode.py "Build a REST API" --iterations 5
|
||||
"""
|
||||
import warnings
|
||||
warnings.filterwarnings("ignore", message="Core Pydantic V1 functionality")
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
from deepagents_cli.agent import create_cli_agent
|
||||
from deepagents_cli.config import console, COLORS, SessionState, create_model
|
||||
from deepagents_cli.execution import execute_task
|
||||
from deepagents_cli.ui import TokenTracker
|
||||
|
||||
|
||||
async def ralph(task: str, max_iterations: int = 0, model_name: str = None):
|
||||
"""Run agent in Ralph loop with beautiful CLI output."""
|
||||
work_dir = tempfile.mkdtemp(prefix="ralph-")
|
||||
|
||||
model = create_model(model_name)
|
||||
agent, backend = create_cli_agent(
|
||||
model=model,
|
||||
assistant_id="ralph",
|
||||
tools=[],
|
||||
auto_approve=True,
|
||||
)
|
||||
session_state = SessionState(auto_approve=True)
|
||||
token_tracker = TokenTracker()
|
||||
|
||||
console.print(f"\n[bold {COLORS['primary']}]Ralph Mode[/bold {COLORS['primary']}]")
|
||||
console.print(f"[dim]Task: {task}[/dim]")
|
||||
console.print(f"[dim]Iterations: {'unlimited (Ctrl+C to stop)' if max_iterations == 0 else max_iterations}[/dim]")
|
||||
console.print(f"[dim]Working directory: {work_dir}[/dim]\n")
|
||||
|
||||
iteration = 1
|
||||
try:
|
||||
while max_iterations == 0 or iteration <= max_iterations:
|
||||
console.print(f"\n[bold cyan]{'='*60}[/bold cyan]")
|
||||
console.print(f"[bold cyan]RALPH ITERATION {iteration}[/bold cyan]")
|
||||
console.print(f"[bold cyan]{'='*60}[/bold cyan]\n")
|
||||
|
||||
iter_display = f"{iteration}/{max_iterations}" if max_iterations > 0 else str(iteration)
|
||||
prompt = f"""## Iteration {iter_display}
|
||||
|
||||
Your previous work is in the filesystem. Check what exists and keep building.
|
||||
|
||||
TASK:
|
||||
{task}
|
||||
|
||||
Make progress. You'll be called again."""
|
||||
|
||||
await execute_task(
|
||||
prompt,
|
||||
agent,
|
||||
"ralph",
|
||||
session_state,
|
||||
token_tracker,
|
||||
backend=backend,
|
||||
)
|
||||
|
||||
console.print(f"\n[dim]...continuing to iteration {iteration + 1}[/dim]")
|
||||
iteration += 1
|
||||
|
||||
except KeyboardInterrupt:
|
||||
console.print(f"\n[bold yellow]Stopped after {iteration} iterations[/bold yellow]")
|
||||
|
||||
# Show created files
|
||||
console.print(f"\n[bold]Files created in {work_dir}:[/bold]")
|
||||
for f in sorted(Path(work_dir).rglob("*")):
|
||||
if f.is_file() and ".git" not in str(f):
|
||||
console.print(f" {f.relative_to(work_dir)}", style="dim")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Ralph Mode - Autonomous looping for DeepAgents",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
python ralph_mode.py "Build a Python course. Use git."
|
||||
python ralph_mode.py "Build a REST API" --iterations 5
|
||||
python ralph_mode.py "Create a CLI tool" --model claude-haiku-4-5-20251001
|
||||
"""
|
||||
)
|
||||
parser.add_argument("task", help="Task to work on (declarative, what you want)")
|
||||
parser.add_argument("--iterations", type=int, default=0, help="Max iterations (0 = unlimited, default: unlimited)")
|
||||
parser.add_argument("--model", help="Model to use (e.g., claude-haiku-4-5-20251001)")
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
asyncio.run(ralph(args.task, args.iterations, args.model))
|
||||
except KeyboardInterrupt:
|
||||
pass # Clean exit on Ctrl+C
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
BIN
deepagents_sourcecode/examples/ralph_mode/ralph_mode_diagram.png
Normal file
BIN
deepagents_sourcecode/examples/ralph_mode/ralph_mode_diagram.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 287 KiB |
@@ -3,10 +3,11 @@
|
||||
The [deepagents](https://github.com/langchain-ai/deepagents) CLI is an open source coding assistant that runs in your terminal, similar to Claude Code.
|
||||
|
||||
**Key Features:**
|
||||
|
||||
- **Built-in Tools**: File operations (read, write, edit, glob, grep), shell commands, web search, and subagent delegation
|
||||
- **Customizable Skills**: Add domain-specific capabilities through a progressive disclosure skill system
|
||||
- **Persistent Memory**: Agent remembers your preferences, coding style, and project context across sessions
|
||||
- **Project-Aware**: Automatically detects project roots and loads project-specific configurations
|
||||
- **Project-Aware**: Automatically detects project roots and loads project-specific configurations
|
||||
|
||||
<img src="cli-banner.jpg" alt="deep agent" width="100%"/>
|
||||
|
||||
@@ -15,11 +16,13 @@ The [deepagents](https://github.com/langchain-ai/deepagents) CLI is an open sour
|
||||
`deepagents-cli` is a Python package that can be installed via pip or uv.
|
||||
|
||||
**Install via pip:**
|
||||
|
||||
```bash
|
||||
pip install deepagents-cli
|
||||
```
|
||||
|
||||
**Or using uv (recommended):**
|
||||
|
||||
```bash
|
||||
# Create a virtual environment
|
||||
uv venv
|
||||
@@ -29,16 +32,19 @@ uv pip install deepagents-cli
|
||||
```
|
||||
|
||||
**Run the agent in your terminal:**
|
||||
|
||||
```bash
|
||||
deepagents
|
||||
```
|
||||
|
||||
**Get help:**
|
||||
|
||||
```bash
|
||||
deepagents help
|
||||
```
|
||||
|
||||
**Common options:**
|
||||
|
||||
```bash
|
||||
# Use a specific agent configuration
|
||||
deepagents --agent mybot
|
||||
@@ -62,11 +68,13 @@ Type naturally as you would in a chat interface. The agent will use its built-in
|
||||
The CLI supports three LLM providers with automatic provider detection based on model name:
|
||||
|
||||
**Supported Providers:**
|
||||
|
||||
- **OpenAI** - Models like `gpt-4o`, `gpt-5-mini`, `o1-preview`, `o3-mini` (default: `gpt-5-mini`)
|
||||
- **Anthropic** - Models like `claude-sonnet-4-5-20250929`, `claude-3-opus-20240229` (default: `claude-sonnet-4-5-20250929`)
|
||||
- **Google** - Models like `gemini-3-pro-preview`, `gemini-1.5-pro` (default: `gemini-3-pro-preview`)
|
||||
- **Google** - Models like `gemini-3-pro-preview`, `gemini-2.5-pro` (default: `gemini-3-pro-preview`)
|
||||
|
||||
**Specify model at startup:**
|
||||
|
||||
```bash
|
||||
# Auto-detects Anthropic from model name pattern
|
||||
deepagents --model claude-sonnet-4-5-20250929
|
||||
@@ -76,11 +84,12 @@ deepagents --model gpt-4o
|
||||
```
|
||||
|
||||
**Or use environment variables:**
|
||||
|
||||
```bash
|
||||
# Set provider-specific model defaults
|
||||
export ANTHROPIC_MODEL="claude-sonnet-4-5-20250929"
|
||||
export OPENAI_MODEL="gpt-4o"
|
||||
export GOOGLE_MODEL="gemini-1.5-pro"
|
||||
export GOOGLE_MODEL="gemini-2.5-pro"
|
||||
|
||||
# Set API keys (required)
|
||||
export ANTHROPIC_API_KEY="your-key"
|
||||
@@ -91,11 +100,12 @@ export GOOGLE_API_KEY="your-key"
|
||||
**Model name conventions:**
|
||||
|
||||
Model names follow each provider's official naming convention:
|
||||
|
||||
- **OpenAI**: See [OpenAI Models Documentation](https://platform.openai.com/docs/models)
|
||||
- **Anthropic**: See [Anthropic Models Documentation](https://docs.anthropic.com/en/docs/about-claude/models)
|
||||
- **Google**: See [Google Gemini Models Documentation](https://ai.google.dev/gemini-api/docs/models/gemini)
|
||||
|
||||
The active model is displayed at startup in the CLI interface.
|
||||
The active model is displayed at startup in the CLI interface.
|
||||
|
||||
## Built-in Tools
|
||||
|
||||
@@ -120,15 +130,17 @@ The agent comes with the following built-in tools (always available without conf
|
||||
> **Human-in-the-Loop (HITL) Approval Required**
|
||||
>
|
||||
> Potentially destructive operations require user approval before execution:
|
||||
>
|
||||
> - **File operations**: `write_file`, `edit_file`
|
||||
> - **Command execution**: `shell`, `execute`
|
||||
> - **External requests**: `web_search`, `fetch_url`
|
||||
> - **Delegation**: `task` (subagents)
|
||||
>
|
||||
> Each operation will prompt for approval showing the action details. Use `--auto-approve` to skip prompts:
|
||||
>
|
||||
> ```bash
|
||||
> deepagents --auto-approve
|
||||
> ```
|
||||
> ```
|
||||
|
||||
## Agent Configuration
|
||||
|
||||
@@ -149,16 +161,19 @@ deepagents create <agent_name>
|
||||
The CLI supports separate LangSmith project configuration for agent tracing vs user code tracing:
|
||||
|
||||
**Agent Tracing** - Traces deepagents operations (tool calls, agent decisions):
|
||||
|
||||
```bash
|
||||
export DEEPAGENTS_LANGSMITH_PROJECT="my-agent-project"
|
||||
```
|
||||
|
||||
**User Code Tracing** - Traces code executed via shell commands:
|
||||
|
||||
```bash
|
||||
export LANGSMITH_PROJECT="my-user-code-project"
|
||||
```
|
||||
|
||||
**Complete Setup Example:**
|
||||
|
||||
```bash
|
||||
# Enable LangSmith tracing
|
||||
export LANGCHAIN_TRACING_V2=true
|
||||
@@ -173,12 +188,14 @@ deepagents
|
||||
```
|
||||
|
||||
When both are configured, the CLI displays:
|
||||
|
||||
```
|
||||
✓ LangSmith tracing enabled: Deepagents → 'agent-traces'
|
||||
User code (shell) → 'user-code-traces'
|
||||
```
|
||||
|
||||
**Why separate projects?**
|
||||
|
||||
- Keep agent operations separate from your application code traces
|
||||
- Easier debugging by isolating agent vs user code behavior
|
||||
- Different retention policies or access controls per project
|
||||
@@ -186,15 +203,15 @@ When both are configured, the CLI displays:
|
||||
**Backwards Compatibility:**
|
||||
If `DEEPAGENTS_LANGSMITH_PROJECT` is not set, both agent and user code trace to the same project specified by `LANGSMITH_PROJECT`.
|
||||
|
||||
## Customization
|
||||
## Customization
|
||||
|
||||
There are two primary ways to customize any agent: **memory** and **skills**.
|
||||
There are two primary ways to customize any agent: **memory** and **skills**.
|
||||
|
||||
Each agent has its own global configuration directory at `~/.deepagents/<agent_name>/`:
|
||||
|
||||
```
|
||||
~/.deepagents/<agent_name>/
|
||||
├── agent.md # Auto-loaded global personality/style
|
||||
├── AGENTS.md # Auto-loaded global personality/style
|
||||
└── skills/ # Auto-loaded agent-specific skills
|
||||
├── web-research/
|
||||
│ └── SKILL.md
|
||||
@@ -208,45 +225,51 @@ Projects can extend the global configuration with project-specific instructions
|
||||
my-project/
|
||||
├── .git/
|
||||
└── .deepagents/
|
||||
├── agent.md # Project-specific instructions
|
||||
├── AGENTS.md # Project-specific instructions
|
||||
└── skills/ # Project-specific skills
|
||||
└── custom-tool/
|
||||
└── SKILL.md
|
||||
```
|
||||
|
||||
The CLI automatically detects project roots (via `.git`) and loads:
|
||||
- Project-specific `agent.md` from `[project-root]/.deepagents/agent.md`
|
||||
|
||||
- Project-specific `AGENTS.md` from `[project-root]/.deepagents/AGENTS.md`
|
||||
- Project-specific skills from `[project-root]/.deepagents/skills/`
|
||||
|
||||
Both global and project configurations are loaded together, allowing you to:
|
||||
- Keep general coding style/preferences in global agent.md
|
||||
- Add project-specific context, conventions, or guidelines in project agent.md
|
||||
|
||||
- Keep general coding style/preferences in global AGENTS.md
|
||||
- Add project-specific context, conventions, or guidelines in project AGENTS.md
|
||||
- Share project-specific skills with your team (committed to version control)
|
||||
- Override global skills with project-specific versions (when skill names match)
|
||||
|
||||
### agent.md files
|
||||
### AGENTS.md files
|
||||
|
||||
`agent.md` files provide persistent memory that is always loaded at session start. Both global and project-level `agent.md` files are loaded together and injected into the system prompt.
|
||||
`AGENTS.md` files provide persistent memory that is always loaded at session start. Both global and project-level `AGENTS.md` files are loaded together and injected into the system prompt.
|
||||
|
||||
**Global `agent.md`** (`~/.deepagents/agent/agent.md`)
|
||||
- Your personality, style, and universal coding preferences
|
||||
- General tone and communication style
|
||||
- Universal coding preferences (formatting, type hints, etc.)
|
||||
- Tool usage patterns that apply everywhere
|
||||
- Workflows and methodologies that don't change per-project
|
||||
**Global `AGENTS.md`** (`~/.deepagents/agent/AGENTS.md`)
|
||||
|
||||
**Project `agent.md`** (`.deepagents/agent.md` in project root)
|
||||
- Project-specific context and conventions
|
||||
- Project architecture and design patterns
|
||||
- Coding conventions specific to this codebase
|
||||
- Testing strategies and deployment processes
|
||||
- Team guidelines and project structure
|
||||
- Your personality, style, and universal coding preferences
|
||||
- General tone and communication style
|
||||
- Universal coding preferences (formatting, type hints, etc.)
|
||||
- Tool usage patterns that apply everywhere
|
||||
- Workflows and methodologies that don't change per-project
|
||||
|
||||
**How it works (AgentMemoryMiddleware):**
|
||||
- Loads both files at startup and injects into system prompt as `<user_memory>` and `<project_memory>`
|
||||
- Appends [memory management instructions](deepagents_cli/agent_memory.py#L44-L158) on when/how to update memory files
|
||||
**Project `AGENTS.md`** (`.deepagents/AGENTS.md` in project root)
|
||||
|
||||
- Project-specific context and conventions
|
||||
- Project architecture and design patterns
|
||||
- Coding conventions specific to this codebase
|
||||
- Testing strategies and deployment processes
|
||||
- Team guidelines and project structure
|
||||
|
||||
**How it works:**
|
||||
|
||||
- Loads memory files at startup and injects into system prompt as `<agent_memory>`
|
||||
- Includes guidelines on when/how to update memory files via `edit_file`
|
||||
|
||||
**When the agent updates memory:**
|
||||
|
||||
- IMMEDIATELY when you describe how it should behave
|
||||
- IMMEDIATELY when you give feedback on its work
|
||||
- When you explicitly ask it to remember something
|
||||
@@ -256,29 +279,33 @@ The agent uses `edit_file` to update memories when learning preferences or recei
|
||||
|
||||
### Project memory files
|
||||
|
||||
Beyond `agent.md`, you can create additional memory files in `.deepagents/` for structured project knowledge. These work similarly to [Anthropic's Memory Tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/memory-tool). The agent receives [detailed instructions](deepagents_cli/agent_memory.py#L123-L158) on when to read and update these files.
|
||||
Beyond `AGENTS.md`, you can create additional memory files in `.deepagents/` for structured project knowledge. These work similarly to [Anthropic's Memory Tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/memory-tool). The agent receives instructions on when to read and update these files.
|
||||
|
||||
**How it works:**
|
||||
|
||||
1. Create markdown files in `[project-root]/.deepagents/` (e.g., `api-design.md`, `architecture.md`, `deployment.md`)
|
||||
2. The agent checks these files when relevant to a task (not auto-loaded into every prompt)
|
||||
3. The agent uses `write_file` or `edit_file` to create/update memory files when learning project patterns
|
||||
|
||||
**Example workflow:**
|
||||
|
||||
```bash
|
||||
# Agent discovers deployment pattern and saves it
|
||||
.deepagents/
|
||||
├── agent.md # Always loaded (personality + conventions)
|
||||
├── AGENTS.md # Always loaded (personality + conventions)
|
||||
├── architecture.md # Loaded on-demand (system design)
|
||||
└── deployment.md # Loaded on-demand (deploy procedures)
|
||||
```
|
||||
|
||||
**When the agent reads memory files:**
|
||||
|
||||
- At the start of new sessions (checks what files exist)
|
||||
- Before answering questions about project-specific topics
|
||||
- When you reference past work or patterns
|
||||
- When performing tasks that match saved knowledge domains
|
||||
|
||||
**Benefits:**
|
||||
|
||||
- **Persistent learning**: Agent remembers project patterns across sessions
|
||||
- **Team collaboration**: Share project knowledge through version control
|
||||
- **Contextual retrieval**: Load only relevant memory when needed (reduces token usage)
|
||||
@@ -298,7 +325,7 @@ mkdir -p ~/.deepagents/agent/skills
|
||||
cp -r examples/skills/web-research ~/.deepagents/agent/skills/
|
||||
```
|
||||
|
||||
To manage skills:
|
||||
To manage skills:
|
||||
|
||||
```bash
|
||||
# List all skills (global + project)
|
||||
@@ -323,8 +350,8 @@ deepagents skills info my-tool --project
|
||||
To use skills (e.g., the langgraph-docs skill), just type a request relevant to a skill and the skill will be used automatically.
|
||||
|
||||
```bash
|
||||
$ deepagents
|
||||
$ "create a agent.py script that implements a LangGraph agent"
|
||||
deepagents
|
||||
"create a agent.py script that implements a LangGraph agent"
|
||||
```
|
||||
|
||||
Skills follow Anthropic's [progressive disclosure pattern](https://www.anthropic.com/engineering/equipping-agents-for-the-real-world-with-agent-skills) - the agent knows skills exist but only reads full instructions when needed.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""CLI를 위한 에이전트 관리 및 생성."""
|
||||
"""Agent management and creation for the CLI."""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
@@ -8,6 +8,7 @@ from deepagents import create_deep_agent
|
||||
from deepagents.backends import CompositeBackend
|
||||
from deepagents.backends.filesystem import FilesystemBackend
|
||||
from deepagents.backends.sandbox import SandboxBackendProtocol
|
||||
from deepagents.middleware import MemoryMiddleware, SkillsMiddleware
|
||||
from langchain.agents.middleware import (
|
||||
InterruptOnConfig,
|
||||
)
|
||||
@@ -15,58 +16,60 @@ from langchain.agents.middleware.types import AgentState
|
||||
from langchain.messages import ToolCall
|
||||
from langchain.tools import BaseTool
|
||||
from langchain_core.language_models import BaseChatModel
|
||||
from langgraph.checkpoint.base import BaseCheckpointSaver
|
||||
from langgraph.checkpoint.memory import InMemorySaver
|
||||
from langgraph.pregel import Pregel
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
from deepagents_cli.agent_memory import AgentMemoryMiddleware
|
||||
from deepagents_cli.config import COLORS, config, console, get_default_coding_instructions, settings
|
||||
from deepagents_cli.integrations.sandbox_factory import get_default_working_dir
|
||||
from deepagents_cli.shell import ShellMiddleware
|
||||
from deepagents_cli.skills import SkillsMiddleware
|
||||
|
||||
|
||||
def list_agents() -> None:
|
||||
"""사용 가능한 모든 에이전트를 나열합니다."""
|
||||
"""List all available agents."""
|
||||
agents_dir = settings.user_deepagents_dir
|
||||
|
||||
if not agents_dir.exists() or not any(agents_dir.iterdir()):
|
||||
console.print("[yellow]에이전트를 찾을 수 없습니다.[/yellow]")
|
||||
console.print("[yellow]No agents found.[/yellow]")
|
||||
console.print(
|
||||
"[dim]처음 사용할 때 ~/.deepagents/에 에이전트가 생성됩니다.[/dim]",
|
||||
"[dim]Agents will be created in ~/.deepagents/ when you first use them.[/dim]",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
return
|
||||
|
||||
console.print("\n[bold]사용 가능한 에이전트:[/bold]\n", style=COLORS["primary"])
|
||||
console.print("\n[bold]Available Agents:[/bold]\n", style=COLORS["primary"])
|
||||
|
||||
for agent_path in sorted(agents_dir.iterdir()):
|
||||
if agent_path.is_dir():
|
||||
agent_name = agent_path.name
|
||||
agent_md = agent_path / "agent.md"
|
||||
agent_md = agent_path / "AGENTS.md"
|
||||
|
||||
if agent_md.exists():
|
||||
console.print(f" • [bold]{agent_name}[/bold]", style=COLORS["primary"])
|
||||
console.print(f" {agent_path}", style=COLORS["dim"])
|
||||
else:
|
||||
console.print(f" • [bold]{agent_name}[/bold] [dim](미완성)[/dim]", style=COLORS["tool"])
|
||||
console.print(
|
||||
f" • [bold]{agent_name}[/bold] [dim](incomplete)[/dim]", style=COLORS["tool"]
|
||||
)
|
||||
console.print(f" {agent_path}", style=COLORS["dim"])
|
||||
|
||||
console.print()
|
||||
|
||||
|
||||
def reset_agent(agent_name: str, source_agent: str | None = None) -> None:
|
||||
"""에이전트를 기본값으로 재설정하거나 다른 에이전트로부터 복사합니다."""
|
||||
"""Reset an agent to default or copy from another agent."""
|
||||
agents_dir = settings.user_deepagents_dir
|
||||
agent_dir = agents_dir / agent_name
|
||||
|
||||
if source_agent:
|
||||
source_dir = agents_dir / source_agent
|
||||
source_md = source_dir / "agent.md"
|
||||
source_md = source_dir / "AGENTS.md"
|
||||
|
||||
if not source_md.exists():
|
||||
console.print(
|
||||
f"[bold red]오류:[/bold red] 소스 에이전트 '{source_agent}'를 찾을 수 없거나 agent.md가 없습니다"
|
||||
f"[bold red]Error:[/bold red] Source agent '{source_agent}' not found "
|
||||
"or has no AGENTS.md"
|
||||
)
|
||||
return
|
||||
|
||||
@@ -78,26 +81,26 @@ def reset_agent(agent_name: str, source_agent: str | None = None) -> None:
|
||||
|
||||
if agent_dir.exists():
|
||||
shutil.rmtree(agent_dir)
|
||||
console.print(f"기존 에이전트 디렉터리를 제거했습니다: {agent_dir}", style=COLORS["tool"])
|
||||
console.print(f"Removed existing agent directory: {agent_dir}", style=COLORS["tool"])
|
||||
|
||||
agent_dir.mkdir(parents=True, exist_ok=True)
|
||||
agent_md = agent_dir / "agent.md"
|
||||
agent_md = agent_dir / "AGENTS.md"
|
||||
agent_md.write_text(source_content)
|
||||
|
||||
console.print(f"✓ 에이전트 '{agent_name}'가 {action_desc}(으)로 재설정되었습니다", style=COLORS["primary"])
|
||||
console.print(f"✓ Agent '{agent_name}' reset to {action_desc}", style=COLORS["primary"])
|
||||
console.print(f"Location: {agent_dir}\n", style=COLORS["dim"])
|
||||
|
||||
|
||||
def get_system_prompt(assistant_id: str, sandbox_type: str | None = None) -> str:
|
||||
"""에이전트에 대한 기본 시스템 프롬프트를 가져옵니다.
|
||||
"""Get the base system prompt for the agent.
|
||||
|
||||
Args:
|
||||
assistant_id: 경로 참조를 위한 에이전트 식별자
|
||||
sandbox_type: 샌드박스 공급자 유형("modal", "runloop", "daytona").
|
||||
None인 경우 에이전트는 로컬 모드에서 작동합니다.
|
||||
assistant_id: The agent identifier for path references
|
||||
sandbox_type: Type of sandbox provider ("modal", "runloop", "daytona").
|
||||
If None, agent is operating in local mode.
|
||||
|
||||
Returns:
|
||||
시스템 프롬프트 문자열 (agent.md 내용 제외)
|
||||
The system prompt string (without AGENTS.md content)
|
||||
"""
|
||||
agent_dir_path = f"~/.deepagents/{assistant_id}"
|
||||
|
||||
@@ -108,32 +111,32 @@ def get_system_prompt(assistant_id: str, sandbox_type: str | None = None) -> str
|
||||
|
||||
working_dir_section = f"""### Current Working Directory
|
||||
|
||||
You are working in a **remote Linux sandbox** at `{working_dir}`.
|
||||
You are operating in a **remote Linux sandbox** at `{working_dir}`.
|
||||
|
||||
All code execution and file operations happen in this sandbox environment.
|
||||
|
||||
**IMPORTANT:**
|
||||
- The CLI runs locally on the user's machine, but executes code remotely.
|
||||
- Use `{working_dir}` as your working directory for all operations.
|
||||
**Important:**
|
||||
- The CLI is running locally on the user's machine, but you execute code remotely
|
||||
- Use `{working_dir}` as your working directory for all operations
|
||||
|
||||
"""
|
||||
else:
|
||||
cwd = Path.cwd()
|
||||
working_dir_section = f"""<env>
|
||||
WORKING_DIRECTORY: {cwd}
|
||||
Working directory: {cwd}
|
||||
</env>
|
||||
|
||||
### Current Working Directory
|
||||
|
||||
The filesystem backend is currently operating at: `{cwd}`
|
||||
The filesystem backend is currently operating in: `{cwd}`
|
||||
|
||||
### File System and Paths
|
||||
|
||||
**IMPORTANT - Path Handling:**
|
||||
- All file paths MUST be absolute (e.g. `{cwd}/file.txt`).
|
||||
- Use the WORKING_DIRECTORY from <env> to construct absolute paths.
|
||||
- Example: To create a file in the working directory, use `{cwd}/research_project/file.md`
|
||||
- Do NOT use relative paths - always construct the full absolute path.
|
||||
- All file paths must be absolute paths (e.g., `{cwd}/file.txt`)
|
||||
- Use the working directory from <env> to construct absolute paths
|
||||
- Example: To create a file in your working directory, use `{cwd}/research_project/file.md`
|
||||
- Never use relative paths - always construct full absolute paths
|
||||
|
||||
"""
|
||||
|
||||
@@ -142,92 +145,103 @@ The filesystem backend is currently operating at: `{cwd}`
|
||||
+ f"""### Skills Directory
|
||||
|
||||
Your skills are stored at: `{agent_dir_path}/skills/`
|
||||
Skills may contain scripts or support files. Use the physical filesystem path when running skill scripts with bash:
|
||||
Skills may contain scripts or supporting files. When executing skill scripts with bash, use the real filesystem path:
|
||||
Example: `bash python {agent_dir_path}/skills/web-research/script.py`
|
||||
|
||||
### Human-in-the-Loop Tool Approvals
|
||||
### Human-in-the-Loop Tool Approval
|
||||
|
||||
Some tool calls require user approval before execution. If a tool call is rejected by the user:
|
||||
1. Accept the decision immediately - do NOT try the same command again.
|
||||
2. Explain that you understand the user rejected the operation.
|
||||
3. Propose an alternative or ask for clarification.
|
||||
4. NEVER try to bypass a rejection by retrying the exact same command.
|
||||
Some tool calls require user approval before execution. When a tool call is rejected by the user:
|
||||
1. Accept their decision immediately - do NOT retry the same command
|
||||
2. Explain that you understand they rejected the action
|
||||
3. Suggest an alternative approach or ask for clarification
|
||||
4. Never attempt the exact same rejected command again
|
||||
|
||||
Respect user decisions and work collaboratively.
|
||||
Respect the user's decisions and work with them collaboratively.
|
||||
|
||||
### Web Search Tool Usage
|
||||
|
||||
When using the web_search tool:
|
||||
1. The tool returns search results with titles, URLs, and content snippets.
|
||||
2. You MUST read and process these results, then respond to the user naturally.
|
||||
3. Do NOT show raw JSON or tool results directly to the user.
|
||||
4. Synthesize information from multiple sources into a coherent answer.
|
||||
5. Cite sources by mentioning page titles or URLs when relevant.
|
||||
6. If you don't find what you need in the search, explain what you found and ask clarifying questions.
|
||||
When you use the web_search tool:
|
||||
1. The tool will return search results with titles, URLs, and content excerpts
|
||||
2. You MUST read and process these results, then respond naturally to the user
|
||||
3. NEVER show raw JSON or tool results directly to the user
|
||||
4. Synthesize the information from multiple sources into a coherent answer
|
||||
5. Cite your sources by mentioning page titles or URLs when relevant
|
||||
6. If the search doesn't find what you need, explain what you found and ask clarifying questions
|
||||
|
||||
The user ONLY sees your text response, not the tool results. Always provide a complete, natural language answer after using web_search.
|
||||
The user only sees your text responses - not tool results. Always provide a complete, natural language answer after using web_search.
|
||||
|
||||
### Todo List Management
|
||||
|
||||
When using the write_todos tool:
|
||||
1. Keep the todo list minimal - aim for 3-6 items max.
|
||||
2. Only create todos for complex, multi-step tasks that really need tracking.
|
||||
3. Break down tasks into clear, actionable items without being overly granular.
|
||||
4. For simple tasks (1-2 steps), just do them - don't create a todo.
|
||||
5. When first creating a todo list for a task, ALWAYS ask the user if the plan looks good before starting work.
|
||||
- Create the todos so they render, then ask "Does this plan look good?" or similar.
|
||||
- Wait for the user's response before marking the first todo in_progress.
|
||||
- Adjust the plan if they want changes.
|
||||
6. Update todo status promptly as you complete each item.
|
||||
1. Keep the todo list MINIMAL - aim for 3-6 items maximum
|
||||
2. Only create todos for complex, multi-step tasks that truly need tracking
|
||||
3. Break down work into clear, actionable items without over-fragmenting
|
||||
4. For simple tasks (1-2 steps), just do them directly without creating todos
|
||||
5. When first creating a todo list for a task, ALWAYS ask the user if the plan looks good before starting work
|
||||
- Create the todos, let them render, then ask: "Does this plan look good?" or similar
|
||||
- Wait for the user's response before marking the first todo as in_progress
|
||||
- If they want changes, adjust the plan accordingly
|
||||
6. Update todo status promptly as you complete each item
|
||||
|
||||
The todo list is a planning tool - use it judiciously to avoid overwhelming the user with excessive task tracking."""
|
||||
)
|
||||
|
||||
|
||||
def _format_write_file_description(tool_call: ToolCall, _state: AgentState, _runtime: Runtime) -> str:
|
||||
"""승인 프롬프트를 위한 write_file 도구 호출 포맷."""
|
||||
def _format_write_file_description(
|
||||
tool_call: ToolCall, _state: AgentState, _runtime: Runtime
|
||||
) -> str:
|
||||
"""Format write_file tool call for approval prompt."""
|
||||
args = tool_call["args"]
|
||||
file_path = args.get("file_path", "unknown")
|
||||
content = args.get("content", "")
|
||||
|
||||
action = "덮어쓰기(Overwrite)" if Path(file_path).exists() else "생성(Create)"
|
||||
action = "Overwrite" if Path(file_path).exists() else "Create"
|
||||
line_count = len(content.splitlines())
|
||||
|
||||
return f"파일: {file_path}\n작업: 파일 {action}\n줄 수: {line_count}"
|
||||
return f"File: {file_path}\nAction: {action} file\nLines: {line_count}"
|
||||
|
||||
|
||||
def _format_edit_file_description(tool_call: ToolCall, _state: AgentState, _runtime: Runtime) -> str:
|
||||
"""승인 프롬프트를 위한 edit_file 도구 호출 포맷."""
|
||||
def _format_edit_file_description(
|
||||
tool_call: ToolCall, _state: AgentState, _runtime: Runtime
|
||||
) -> str:
|
||||
"""Format edit_file tool call for approval prompt."""
|
||||
args = tool_call["args"]
|
||||
file_path = args.get("file_path", "unknown")
|
||||
replace_all = bool(args.get("replace_all", False))
|
||||
|
||||
return f"파일: {file_path}\n작업: 텍스트 교체 ({'모든 항목' if replace_all else '단일 항목'})"
|
||||
return (
|
||||
f"File: {file_path}\n"
|
||||
f"Action: Replace text ({'all occurrences' if replace_all else 'single occurrence'})"
|
||||
)
|
||||
|
||||
|
||||
def _format_web_search_description(tool_call: ToolCall, _state: AgentState, _runtime: Runtime) -> str:
|
||||
def _format_web_search_description(
|
||||
tool_call: ToolCall, _state: AgentState, _runtime: Runtime
|
||||
) -> str:
|
||||
"""Format web_search tool call for approval prompt."""
|
||||
args = tool_call["args"]
|
||||
query = args.get("query", "unknown")
|
||||
max_results = args.get("max_results", 5)
|
||||
|
||||
return f"쿼리: {query}\n최대 결과: {max_results}\n\n⚠️ 이 작업은 Tavily API 크레딧을 사용합니다"
|
||||
return f"Query: {query}\nMax results: {max_results}\n\n⚠️ This will use Tavily API credits"
|
||||
|
||||
|
||||
def _format_fetch_url_description(tool_call: ToolCall, _state: AgentState, _runtime: Runtime) -> str:
|
||||
def _format_fetch_url_description(
|
||||
tool_call: ToolCall, _state: AgentState, _runtime: Runtime
|
||||
) -> str:
|
||||
"""Format fetch_url tool call for approval prompt."""
|
||||
args = tool_call["args"]
|
||||
url = args.get("url", "unknown")
|
||||
timeout = args.get("timeout", 30)
|
||||
|
||||
return f"URL: {url}\n시간 제한: {timeout}초\n\n⚠️ 웹 콘텐츠를 가져와 마크다운으로 변환합니다"
|
||||
return f"URL: {url}\nTimeout: {timeout}s\n\n⚠️ Will fetch and convert web content to markdown"
|
||||
|
||||
|
||||
def _format_task_description(tool_call: ToolCall, _state: AgentState, _runtime: Runtime) -> str:
|
||||
"""승인 프롬프트를 위한 task(서브 에이전트) 도구 호출 포맷.
|
||||
"""Format task (subagent) tool call for approval prompt.
|
||||
|
||||
task 도구 서명은: task(description: str, subagent_type: str)
|
||||
description에는 서브 에이전트에게 전송될 모든 지침이 포함됩니다.
|
||||
The task tool signature is: task(description: str, subagent_type: str)
|
||||
The description contains all instructions that will be sent to the subagent.
|
||||
"""
|
||||
args = tool_call["args"]
|
||||
description = args.get("description", "unknown")
|
||||
@@ -239,31 +253,31 @@ def _format_task_description(tool_call: ToolCall, _state: AgentState, _runtime:
|
||||
description_preview = description[:500] + "..."
|
||||
|
||||
return (
|
||||
f"서브 에이전트 유형: {subagent_type}\n\n"
|
||||
f"작업 지침:\n"
|
||||
f"Subagent Type: {subagent_type}\n\n"
|
||||
f"Task Instructions:\n"
|
||||
f"{'─' * 40}\n"
|
||||
f"{description_preview}\n"
|
||||
f"{'─' * 40}\n\n"
|
||||
f"⚠️ 서브 에이전트는 파일 작업 및 셸 명령에 접근할 수 있습니다"
|
||||
f"⚠️ Subagent will have access to file operations and shell commands"
|
||||
)
|
||||
|
||||
|
||||
def _format_shell_description(tool_call: ToolCall, _state: AgentState, _runtime: Runtime) -> str:
|
||||
"""Format shell tool call for approval prompt."""
|
||||
args = tool_call["args"]
|
||||
command = args.get("command", "없음")
|
||||
return f"셸 명령: {command}\n작업 디렉터리: {Path.cwd()}"
|
||||
command = args.get("command", "N/A")
|
||||
return f"Shell Command: {command}\nWorking Directory: {Path.cwd()}"
|
||||
|
||||
|
||||
def _format_execute_description(tool_call: ToolCall, _state: AgentState, _runtime: Runtime) -> str:
|
||||
"""Format execute tool call for approval prompt."""
|
||||
args = tool_call["args"]
|
||||
command = args.get("command", "없음")
|
||||
return f"명령 실행: {command}\n위치: 원격 샌드박스"
|
||||
command = args.get("command", "N/A")
|
||||
return f"Execute Command: {command}\nLocation: Remote Sandbox"
|
||||
|
||||
|
||||
def _add_interrupt_on() -> dict[str, InterruptOnConfig]:
|
||||
"""파괴적인 도구에 대해 히먼-인-더-루프(human-in-the-loop) interrupt_on 설정을 구성합니다."""
|
||||
"""Configure human-in-the-loop interrupt_on settings for destructive tools."""
|
||||
shell_interrupt_config: InterruptOnConfig = {
|
||||
"allowed_decisions": ["approve", "reject"],
|
||||
"description": _format_shell_description,
|
||||
@@ -321,40 +335,42 @@ def create_cli_agent(
|
||||
enable_memory: bool = True,
|
||||
enable_skills: bool = True,
|
||||
enable_shell: bool = True,
|
||||
checkpointer: BaseCheckpointSaver | None = None,
|
||||
) -> tuple[Pregel, CompositeBackend]:
|
||||
"""유연한 옵션으로 CLI 구성 에이전트를 생성합니다.
|
||||
"""Create a CLI-configured agent with flexible options.
|
||||
|
||||
이것은 deepagents CLI 에이전트 생성을 위한 주요 진입점이며,
|
||||
내부적으로 사용되거나 외부 코드(예: 벤치마킹 프레임워크, Harbor)에서 사용할 수 있습니다.
|
||||
This is the main entry point for creating a deepagents CLI agent, usable both
|
||||
internally and from external code (e.g., benchmarking frameworks, Harbor).
|
||||
|
||||
Args:
|
||||
model: 사용할 LLM 모델 (예: "anthropic:claude-sonnet-4-5-20250929")
|
||||
assistant_id: 메모리/상태 저장을 위한 에이전트 식별자
|
||||
tools: 에이전트에 제공할 추가 도구 (기본값: 빈 목록)
|
||||
sandbox: 원격 실행을 위한 선택적 샌드박스 백엔드 (예: ModalBackend).
|
||||
None인 경우 로컬 파일시스템 + 셸을 사용합니다.
|
||||
sandbox_type: 샌드박스 공급자 유형("modal", "runloop", "daytona").
|
||||
시스템 프롬프트 생성에 사용됩니다.
|
||||
system_prompt: 기본 시스템 프롬프트를 재정의합니다. None인 경우
|
||||
sandbox_type 및 assistant_id를 기반으로 생성합니다.
|
||||
auto_approve: True인 경우 사람의 확인 없이 모든 도구 호출을 자동으로 승인합니다.
|
||||
자동화된 워크플로에 유용합니다.
|
||||
enable_memory: 영구 메모리를 위한 AgentMemoryMiddleware 활성화
|
||||
enable_skills: 사용자 정의 에이전트 스킬을 위한 SkillsMiddleware 활성화
|
||||
enable_shell: 로컬 셸 실행을 위한 ShellMiddleware 활성화 (로컬 모드에서만)
|
||||
model: LLM model to use (e.g., "anthropic:claude-sonnet-4-5-20250929")
|
||||
assistant_id: Agent identifier for memory/state storage
|
||||
tools: Additional tools to provide to agent
|
||||
sandbox: Optional sandbox backend for remote execution (e.g., ModalBackend).
|
||||
If None, uses local filesystem + shell.
|
||||
sandbox_type: Type of sandbox provider ("modal", "runloop", "daytona").
|
||||
Used for system prompt generation.
|
||||
system_prompt: Override the default system prompt. If None, generates one
|
||||
based on sandbox_type and assistant_id.
|
||||
auto_approve: If True, automatically approves all tool calls without human
|
||||
confirmation. Useful for automated workflows.
|
||||
enable_memory: Enable MemoryMiddleware for persistent memory
|
||||
enable_skills: Enable SkillsMiddleware for custom agent skills
|
||||
enable_shell: Enable ShellMiddleware for local shell execution (only in local mode)
|
||||
checkpointer: Optional checkpointer for session persistence. If None, uses
|
||||
InMemorySaver (no persistence across CLI invocations).
|
||||
|
||||
Returns:
|
||||
(agent_graph, composite_backend)의 2-튜플
|
||||
- agent_graph: 실행 준비된 구성된 LangGraph Pregel 인스턴스
|
||||
- composite_backend: 파일 작업을 위한 CompositeBackend
|
||||
2-tuple of (agent_graph, backend)
|
||||
- agent_graph: Configured LangGraph Pregel instance ready for execution
|
||||
- composite_backend: CompositeBackend for file operations
|
||||
"""
|
||||
if tools is None:
|
||||
tools = []
|
||||
tools = tools or []
|
||||
|
||||
# Setup agent directory for persistent memory (if enabled)
|
||||
if enable_memory or enable_skills:
|
||||
agent_dir = settings.ensure_agent_dir(assistant_id)
|
||||
agent_md = agent_dir / "agent.md"
|
||||
agent_md = agent_dir / "AGENTS.md"
|
||||
if not agent_md.exists():
|
||||
source_content = get_default_coding_instructions()
|
||||
agent_md.write_text(source_content)
|
||||
@@ -369,27 +385,37 @@ def create_cli_agent(
|
||||
# Build middleware stack based on enabled features
|
||||
agent_middleware = []
|
||||
|
||||
# Add memory middleware
|
||||
if enable_memory:
|
||||
memory_sources = [str(settings.get_user_agent_md_path(assistant_id))]
|
||||
project_agent_md = settings.get_project_agent_md_path()
|
||||
if project_agent_md:
|
||||
memory_sources.append(str(project_agent_md))
|
||||
|
||||
agent_middleware.append(
|
||||
MemoryMiddleware(
|
||||
backend=FilesystemBackend(),
|
||||
sources=memory_sources,
|
||||
)
|
||||
)
|
||||
|
||||
# Add skills middleware
|
||||
if enable_skills:
|
||||
sources = [str(skills_dir)]
|
||||
if project_skills_dir:
|
||||
sources.append(str(project_skills_dir))
|
||||
|
||||
agent_middleware.append(
|
||||
SkillsMiddleware(
|
||||
backend=FilesystemBackend(),
|
||||
sources=sources,
|
||||
)
|
||||
)
|
||||
|
||||
# CONDITIONAL SETUP: Local vs Remote Sandbox
|
||||
if sandbox is None:
|
||||
# ========== LOCAL MODE ==========
|
||||
composite_backend = CompositeBackend(
|
||||
default=FilesystemBackend(), # Current working directory
|
||||
routes={}, # No virtualization - use real paths
|
||||
)
|
||||
|
||||
# Add memory middleware
|
||||
if enable_memory:
|
||||
agent_middleware.append(AgentMemoryMiddleware(settings=settings, assistant_id=assistant_id))
|
||||
|
||||
# Add skills middleware
|
||||
if enable_skills:
|
||||
agent_middleware.append(
|
||||
SkillsMiddleware(
|
||||
skills_dir=skills_dir,
|
||||
assistant_id=assistant_id,
|
||||
project_skills_dir=project_skills_dir,
|
||||
)
|
||||
)
|
||||
backend = FilesystemBackend() # Current working directory
|
||||
|
||||
# Add shell middleware (only in local mode)
|
||||
if enable_shell:
|
||||
@@ -407,25 +433,7 @@ def create_cli_agent(
|
||||
)
|
||||
else:
|
||||
# ========== REMOTE SANDBOX MODE ==========
|
||||
composite_backend = CompositeBackend(
|
||||
default=sandbox, # Remote sandbox (ModalBackend, etc.)
|
||||
routes={}, # No virtualization
|
||||
)
|
||||
|
||||
# Add memory middleware
|
||||
if enable_memory:
|
||||
agent_middleware.append(AgentMemoryMiddleware(settings=settings, assistant_id=assistant_id))
|
||||
|
||||
# Add skills middleware
|
||||
if enable_skills:
|
||||
agent_middleware.append(
|
||||
SkillsMiddleware(
|
||||
skills_dir=skills_dir,
|
||||
assistant_id=assistant_id,
|
||||
project_skills_dir=project_skills_dir,
|
||||
)
|
||||
)
|
||||
|
||||
backend = sandbox # Remote sandbox (ModalBackend, etc.)
|
||||
# Note: Shell middleware not used in sandbox mode
|
||||
# File operations and execute tool are provided by the sandbox backend
|
||||
|
||||
@@ -441,7 +449,14 @@ def create_cli_agent(
|
||||
# Full HITL for destructive operations
|
||||
interrupt_on = _add_interrupt_on()
|
||||
|
||||
composite_backend = CompositeBackend(
|
||||
default=backend,
|
||||
routes={},
|
||||
)
|
||||
|
||||
# Create the agent
|
||||
# Use provided checkpointer or fallback to InMemorySaver
|
||||
final_checkpointer = checkpointer if checkpointer is not None else InMemorySaver()
|
||||
agent = create_deep_agent(
|
||||
model=model,
|
||||
system_prompt=system_prompt,
|
||||
@@ -449,6 +464,6 @@ def create_cli_agent(
|
||||
backend=composite_backend,
|
||||
middleware=agent_middleware,
|
||||
interrupt_on=interrupt_on,
|
||||
checkpointer=InMemorySaver(),
|
||||
checkpointer=final_checkpointer,
|
||||
).with_config(config)
|
||||
return agent, composite_backend
|
||||
|
||||
@@ -1,328 +0,0 @@
|
||||
"""에이전트별 장기 메모리를 시스템 프롬프트에 로드하기 위한 미들웨어."""
|
||||
|
||||
import contextlib
|
||||
from collections.abc import Awaitable, Callable
|
||||
from typing import NotRequired, TypedDict, cast
|
||||
|
||||
from langchain.agents.middleware.types import (
|
||||
AgentMiddleware,
|
||||
AgentState,
|
||||
ModelRequest,
|
||||
ModelResponse,
|
||||
)
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
from deepagents_cli.config import Settings
|
||||
|
||||
|
||||
class AgentMemoryState(AgentState):
|
||||
"""에이전트 메모리 미들웨어를 위한 상태."""
|
||||
|
||||
user_memory: NotRequired[str]
|
||||
"""~/.deepagents/{agent}/의 개인 설정 (모든 곳에 적용됨)."""
|
||||
|
||||
project_memory: NotRequired[str]
|
||||
"""프로젝트별 컨텍스트 (프로젝트 루트에서 로드됨)."""
|
||||
|
||||
|
||||
class AgentMemoryStateUpdate(TypedDict):
|
||||
"""에이전트 메모리 미들웨어에 대한 상태 업데이트."""
|
||||
|
||||
user_memory: NotRequired[str]
|
||||
"""~/.deepagents/{agent}/의 개인 설정 (모든 곳에 적용됨)."""
|
||||
|
||||
project_memory: NotRequired[str]
|
||||
"""프로젝트별 컨텍스트 (프로젝트 루트에서 로드됨)."""
|
||||
|
||||
|
||||
# Long-term Memory Documentation
|
||||
# Note: Claude Code loads CLAUDE.md files hierarchically and combines them (not precedence-based):
|
||||
# - Loads recursively from cwd up to (but not including) root directory
|
||||
# - Multiple files are combined hierarchically: enterprise → project → user
|
||||
# - Both [project-root]/CLAUDE.md and [project-root]/.claude/CLAUDE.md are loaded if both exist
|
||||
# - Files higher in hierarchy load first, providing foundation for more specific memories
|
||||
# We will follow that pattern for deepagents-cli
|
||||
LONGTERM_MEMORY_SYSTEM_PROMPT = """
|
||||
|
||||
## Long-term Memory
|
||||
|
||||
Long-term memory is stored in files on the filesystem and persists across sessions.
|
||||
|
||||
**User Memory Location**: `{agent_dir_absolute}` (display: `{agent_dir_display}`)
|
||||
**Project Memory Location**: {project_memory_info}
|
||||
|
||||
The system prompt is loaded from two sources at startup:
|
||||
1. **User agent.md**: `{agent_dir_absolute}/agent.md` - personal settings that apply everywhere
|
||||
2. **Project agent.md**: loaded from the project root if available - project-specific instructions
|
||||
|
||||
Project-specific agent.md files are loaded from the following locations (combined if both exist):
|
||||
- `[project-root]/.deepagents/agent.md` (preferred)
|
||||
- `[project-root]/agent.md` (fallback, included if both exist)
|
||||
|
||||
**When you should check/read memory (IMPORTANT - do this first):**
|
||||
- **At the start of every new session**: Check both user and project memory
|
||||
- User: `ls {agent_dir_absolute}`
|
||||
- Project: `ls {project_deepagents_dir}` (if inside a project)
|
||||
- **Before answering a question**: If asked "What do you know about X?" or "How do I do Y?", check project memory first, then user.
|
||||
- **When the user asks you to do a task**: Check for project-specific guides or examples.
|
||||
- **When the user refers to past work**: Search project memory files for relevant context.
|
||||
|
||||
**Memory-First Response Pattern:**
|
||||
1. User asks question -> Check project directory first: `ls {project_deepagents_dir}`
|
||||
2. If relevant files exist -> Read them: `read_file '{project_deepagents_dir}/[filename]'`
|
||||
3. If needed, check user memory -> `ls {agent_dir_absolute}`
|
||||
4. Answer by supplementing general knowledge with stored knowledge.
|
||||
|
||||
**When you should update memory:**
|
||||
- **Immediately when the user describes your role or how you should behave**
|
||||
- **Immediately when the user gives you feedback** - record what went wrong and how to do better in memory.
|
||||
- When the user explicitly asks you to remember something.
|
||||
- When patterns or preferences emerge (coding style, conventions, workflow).
|
||||
- After a significant task where the context would be helpful for future sessions.
|
||||
|
||||
**Learning from Feedback:**
|
||||
- When the user tells you something is better or worse, figure out why and encode it as a pattern.
|
||||
- Every correction is an opportunity to improve permanently - don't just fix the immediate issue, update your instructions.
|
||||
- If the user says "You should remember X" or "Pay attention to Y", treat this as highest priority and update memory immediately.
|
||||
- Look for the underlying principles behind corrections, not just the specific mistakes.
|
||||
|
||||
## Deciding Where to Store Memory
|
||||
|
||||
When writing or updating agent memory, decide where each fact, configuration, or behavior belongs:
|
||||
|
||||
### User Agent File: `{agent_dir_absolute}/agent.md`
|
||||
-> Describes the agent's **personality, style, and universal behaviors** across all projects.
|
||||
|
||||
**Store here:**
|
||||
- General tone and communication style
|
||||
- Universal coding preferences (formatting, commenting style, etc.)
|
||||
- General workflows and methodologies to follow
|
||||
- Tool usage patterns that apply everywhere
|
||||
- Personal preferences that don't change between projects
|
||||
|
||||
**Examples:**
|
||||
- "Be concise and direct in your answers"
|
||||
- "Always use type hints in Python"
|
||||
- "Prefer functional programming patterns"
|
||||
|
||||
### Project Agent File: `{project_deepagents_dir}/agent.md`
|
||||
-> Describes **how this specific project works** and **how the agent should behave here only**.
|
||||
|
||||
**Store here:**
|
||||
- Project-specific architecture and design patterns
|
||||
- Coding conventions specific to this codebase
|
||||
- Project structure and organization
|
||||
- Testing strategies for this project
|
||||
- Deployment processes and workflows
|
||||
- Team conventions and guidelines
|
||||
|
||||
**Examples:**
|
||||
- "This project uses FastAPI with SQLAlchemy"
|
||||
- "Tests are located in tests/ directory mirroring src structure"
|
||||
- "All API changes require updating OpenAPI specs"
|
||||
|
||||
### Project Memory Files: `{project_deepagents_dir}/*.md`
|
||||
-> Use for **project-specific reference information** and structured notes.
|
||||
|
||||
**Store here:**
|
||||
- API design documentation
|
||||
- Architecture decisions and reasoning
|
||||
- Deployment procedures
|
||||
- Common debugging patterns
|
||||
- Onboarding information
|
||||
|
||||
**Examples:**
|
||||
- `{project_deepagents_dir}/api-design.md` - REST API patterns used
|
||||
- `{project_deepagents_dir}/architecture.md` - System architecture overview
|
||||
- `{project_deepagents_dir}/deployment.md` - How to deploy this project
|
||||
|
||||
### File Operations:
|
||||
|
||||
**User Memory:**
|
||||
```
|
||||
ls {agent_dir_absolute} # List user memory files
|
||||
read_file '{agent_dir_absolute}/agent.md' # Read user preferences
|
||||
edit_file '{agent_dir_absolute}/agent.md' ... # Update user preferences
|
||||
```
|
||||
|
||||
**Project Memory (Preferred for project-specific info):**
|
||||
```
|
||||
ls {project_deepagents_dir} # List project memory files
|
||||
read_file '{project_deepagents_dir}/agent.md' # Read project guidelines
|
||||
edit_file '{project_deepagents_dir}/agent.md' ... # Update project guidelines
|
||||
write_file '{project_deepagents_dir}/agent.md' ... # Create project memory file
|
||||
```
|
||||
|
||||
**IMPORTANT**:
|
||||
- Project memory files are stored in `.deepagents/` inside the project root.
|
||||
- Always use absolute paths for file operations.
|
||||
- Determine if info is project-specific (check user vs project memory) before answering."""
|
||||
|
||||
|
||||
DEFAULT_MEMORY_SNIPPET = """<user_memory>
|
||||
{user_memory}
|
||||
</user_memory>
|
||||
|
||||
<project_memory>
|
||||
{project_memory}
|
||||
</project_memory>"""
|
||||
|
||||
|
||||
class AgentMemoryMiddleware(AgentMiddleware):
|
||||
"""에이전트별 장기 메모리를 로드하기 위한 미들웨어.
|
||||
|
||||
이 미들웨어는 파일(agent.md)에서 에이전트의 장기 메모리를 로드하고
|
||||
시스템 프롬프트에 주입합니다. 메모리는 대화 시작 시 한 번 로드되어
|
||||
상태에 저장됩니다.
|
||||
"""
|
||||
|
||||
state_schema = AgentMemoryState
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
settings: Settings,
|
||||
assistant_id: str,
|
||||
system_prompt_template: str | None = None,
|
||||
) -> None:
|
||||
"""에이전트 메모리 미들웨어를 초기화합니다.
|
||||
|
||||
Args:
|
||||
settings: 프로젝트 감지 및 경로가 포함된 전역 설정 인스턴스.
|
||||
assistant_id: 에이전트 식별자.
|
||||
system_prompt_template: 시스템 프롬프트에 에이전트 메모리를 주입하기 위한
|
||||
선택적 사용자 정의 템플릿.
|
||||
"""
|
||||
self.settings = settings
|
||||
self.assistant_id = assistant_id
|
||||
|
||||
# User paths
|
||||
self.agent_dir = settings.get_agent_dir(assistant_id)
|
||||
# Store both display path (with ~) and absolute path for file operations
|
||||
self.agent_dir_display = f"~/.deepagents/{assistant_id}"
|
||||
self.agent_dir_absolute = str(self.agent_dir)
|
||||
|
||||
# Project paths (from settings)
|
||||
self.project_root = settings.project_root
|
||||
|
||||
self.system_prompt_template = system_prompt_template or DEFAULT_MEMORY_SNIPPET
|
||||
|
||||
def before_agent(
|
||||
self,
|
||||
state: AgentMemoryState,
|
||||
runtime: Runtime,
|
||||
) -> AgentMemoryStateUpdate:
|
||||
"""에이전트 실행 전에 파일에서 에이전트 메모리를 로드합니다.
|
||||
|
||||
사용자 agent.md와 프로젝트별 agent.md가 있으면 로드합니다.
|
||||
상태에 아직 없는 경우에만 로드합니다.
|
||||
|
||||
사용자 업데이트를 포착하기 위해 매 호출마다 파일 존재 여부를 동적으로 확인합니다.
|
||||
|
||||
Args:
|
||||
state: 현재 에이전트 상태.
|
||||
runtime: 런타임 컨텍스트.
|
||||
|
||||
Returns:
|
||||
user_memory 및 project_memory가 채워진 업데이트된 상태.
|
||||
"""
|
||||
result: AgentMemoryStateUpdate = {}
|
||||
|
||||
# Load user memory if not already in state
|
||||
if "user_memory" not in state:
|
||||
user_path = self.settings.get_user_agent_md_path(self.assistant_id)
|
||||
if user_path.exists():
|
||||
with contextlib.suppress(OSError, UnicodeDecodeError):
|
||||
result["user_memory"] = user_path.read_text()
|
||||
|
||||
# Load project memory if not already in state
|
||||
if "project_memory" not in state:
|
||||
project_path = self.settings.get_project_agent_md_path()
|
||||
if project_path and project_path.exists():
|
||||
with contextlib.suppress(OSError, UnicodeDecodeError):
|
||||
result["project_memory"] = project_path.read_text()
|
||||
|
||||
return result
|
||||
|
||||
def _build_system_prompt(self, request: ModelRequest) -> str:
|
||||
"""메모리 섹션이 포함된 전체 시스템 프롬프트를 작성합니다.
|
||||
|
||||
Args:
|
||||
request: 상태 및 기본 시스템 프롬프트가 포함된 모델 요청.
|
||||
|
||||
Returns:
|
||||
메모리 섹션이 주입된 전체 시스템 프롬프트.
|
||||
"""
|
||||
# Extract memory from state
|
||||
state = cast("AgentMemoryState", request.state)
|
||||
user_memory = state.get("user_memory")
|
||||
project_memory = state.get("project_memory")
|
||||
base_system_prompt = request.system_prompt
|
||||
|
||||
# Build project memory info for documentation
|
||||
if self.project_root and project_memory:
|
||||
project_memory_info = f"`{self.project_root}` (detected)"
|
||||
elif self.project_root:
|
||||
project_memory_info = f"`{self.project_root}` (no agent.md found)"
|
||||
else:
|
||||
project_memory_info = "None (not in a git project)"
|
||||
|
||||
# Build project deepagents directory path
|
||||
if self.project_root:
|
||||
project_deepagents_dir = str(self.project_root / ".deepagents")
|
||||
else:
|
||||
project_deepagents_dir = "[project-root]/.deepagents (not in a project)"
|
||||
|
||||
# Format memory section with both memories
|
||||
memory_section = self.system_prompt_template.format(
|
||||
user_memory=user_memory if user_memory else "(No user agent.md)",
|
||||
project_memory=project_memory if project_memory else "(No project agent.md)",
|
||||
)
|
||||
|
||||
system_prompt = memory_section
|
||||
|
||||
if base_system_prompt:
|
||||
system_prompt += "\n\n" + base_system_prompt
|
||||
|
||||
system_prompt += "\n\n" + LONGTERM_MEMORY_SYSTEM_PROMPT.format(
|
||||
agent_dir_absolute=self.agent_dir_absolute,
|
||||
agent_dir_display=self.agent_dir_display,
|
||||
project_memory_info=project_memory_info,
|
||||
project_deepagents_dir=project_deepagents_dir,
|
||||
)
|
||||
|
||||
return system_prompt
|
||||
|
||||
def wrap_model_call(
|
||||
self,
|
||||
request: ModelRequest,
|
||||
handler: Callable[[ModelRequest], ModelResponse],
|
||||
) -> ModelResponse:
|
||||
"""시스템 프롬프트에 에이전트 메모리를 주입합니다.
|
||||
|
||||
Args:
|
||||
request: 처리 중인 모델 요청.
|
||||
handler: 수정된 요청으로 호출할 핸들러 함수.
|
||||
|
||||
Returns:
|
||||
핸들러의 모델 응답.
|
||||
"""
|
||||
system_prompt = self._build_system_prompt(request)
|
||||
return handler(request.override(system_prompt=system_prompt))
|
||||
|
||||
async def awrap_model_call(
|
||||
self,
|
||||
request: ModelRequest,
|
||||
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
|
||||
) -> ModelResponse:
|
||||
"""(비동기) 시스템 프롬프트에 에이전트 메모리를 주입합니다.
|
||||
|
||||
Args:
|
||||
request: 처리 중인 모델 요청.
|
||||
handler: 수정된 요청으로 호출할 핸들러 함수.
|
||||
|
||||
Returns:
|
||||
핸들러의 모델 응답.
|
||||
"""
|
||||
system_prompt = self._build_system_prompt(request)
|
||||
return await handler(request.override(system_prompt=system_prompt))
|
||||
665
deepagents_sourcecode/libs/deepagents-cli/deepagents_cli/app.py
Normal file
665
deepagents_sourcecode/libs/deepagents-cli/deepagents_cli/app.py
Normal file
@@ -0,0 +1,665 @@
|
||||
"""Textual UI application for deepagents-cli."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
import subprocess
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any, ClassVar
|
||||
|
||||
from textual.app import App
|
||||
from textual.binding import Binding, BindingType
|
||||
from textual.containers import Container, VerticalScroll
|
||||
from textual.css.query import NoMatches
|
||||
from textual.events import MouseUp # noqa: TC002 - used in type annotation
|
||||
from textual.widgets import Static # noqa: TC002 - used at runtime
|
||||
|
||||
from deepagents_cli.clipboard import copy_selection_to_clipboard
|
||||
from deepagents_cli.textual_adapter import TextualUIAdapter, execute_task_textual
|
||||
from deepagents_cli.widgets.approval import ApprovalMenu
|
||||
from deepagents_cli.widgets.chat_input import ChatInput
|
||||
from deepagents_cli.widgets.loading import LoadingWidget
|
||||
from deepagents_cli.widgets.messages import (
|
||||
AssistantMessage,
|
||||
ErrorMessage,
|
||||
SystemMessage,
|
||||
ToolCallMessage,
|
||||
UserMessage,
|
||||
)
|
||||
from deepagents_cli.widgets.status import StatusBar
|
||||
from deepagents_cli.widgets.welcome import WelcomeBanner
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langgraph.pregel import Pregel
|
||||
from textual.app import ComposeResult
|
||||
from textual.worker import Worker
|
||||
|
||||
|
||||
class TextualTokenTracker:
|
||||
"""Token tracker that updates the status bar."""
|
||||
|
||||
def __init__(self, update_callback: callable) -> None:
|
||||
"""Initialize with a callback to update the display."""
|
||||
self._update_callback = update_callback
|
||||
self.current_context = 0
|
||||
|
||||
def add(self, input_tokens: int, output_tokens: int) -> None: # noqa: ARG002
|
||||
"""Update token count from a response."""
|
||||
self.current_context = input_tokens
|
||||
self._update_callback(input_tokens)
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset token count."""
|
||||
self.current_context = 0
|
||||
self._update_callback(0)
|
||||
|
||||
|
||||
class TextualSessionState:
|
||||
"""Session state for the Textual app."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
auto_approve: bool = False,
|
||||
thread_id: str | None = None,
|
||||
) -> None:
|
||||
"""Initialize session state.
|
||||
|
||||
Args:
|
||||
auto_approve: Whether to auto-approve tool calls
|
||||
thread_id: Optional thread ID (generates 8-char hex if not provided)
|
||||
"""
|
||||
self.auto_approve = auto_approve
|
||||
self.thread_id = thread_id if thread_id else uuid.uuid4().hex[:8]
|
||||
|
||||
def reset_thread(self) -> str:
|
||||
"""Reset to a new thread. Returns the new thread_id."""
|
||||
self.thread_id = uuid.uuid4().hex[:8]
|
||||
return self.thread_id
|
||||
|
||||
|
||||
class DeepAgentsApp(App):
|
||||
"""Main Textual application for deepagents-cli."""
|
||||
|
||||
TITLE = "DeepAgents"
|
||||
CSS_PATH = "app.tcss"
|
||||
ENABLE_COMMAND_PALETTE = False
|
||||
|
||||
# Slow down scroll speed (default is 3 lines per scroll event)
|
||||
# Using 0.25 to require 4 scroll events per line - very smooth
|
||||
SCROLL_SENSITIVITY_Y = 0.25
|
||||
|
||||
BINDINGS: ClassVar[list[BindingType]] = [
|
||||
Binding("escape", "interrupt", "Interrupt", show=False, priority=True),
|
||||
Binding("ctrl+c", "quit_or_interrupt", "Quit/Interrupt", show=False),
|
||||
Binding("ctrl+d", "quit_app", "Quit", show=False, priority=True),
|
||||
Binding("ctrl+t", "toggle_auto_approve", "Toggle Auto-Approve", show=False),
|
||||
Binding(
|
||||
"shift+tab", "toggle_auto_approve", "Toggle Auto-Approve", show=False, priority=True
|
||||
),
|
||||
Binding("ctrl+o", "toggle_tool_output", "Toggle Tool Output", show=False),
|
||||
# Approval menu keys (handled at App level for reliability)
|
||||
Binding("up", "approval_up", "Up", show=False),
|
||||
Binding("k", "approval_up", "Up", show=False),
|
||||
Binding("down", "approval_down", "Down", show=False),
|
||||
Binding("j", "approval_down", "Down", show=False),
|
||||
Binding("enter", "approval_select", "Select", show=False),
|
||||
Binding("y", "approval_yes", "Yes", show=False),
|
||||
Binding("1", "approval_yes", "Yes", show=False),
|
||||
Binding("n", "approval_no", "No", show=False),
|
||||
Binding("2", "approval_no", "No", show=False),
|
||||
Binding("a", "approval_auto", "Auto", show=False),
|
||||
Binding("3", "approval_auto", "Auto", show=False),
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
agent: Pregel | None = None,
|
||||
assistant_id: str | None = None,
|
||||
backend: Any = None, # noqa: ANN401 # CompositeBackend
|
||||
auto_approve: bool = False,
|
||||
cwd: str | Path | None = None,
|
||||
thread_id: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize the DeepAgents application.
|
||||
|
||||
Args:
|
||||
agent: Pre-configured LangGraph agent (optional for standalone mode)
|
||||
assistant_id: Agent identifier for memory storage
|
||||
backend: Backend for file operations
|
||||
auto_approve: Whether to start with auto-approve enabled
|
||||
cwd: Current working directory to display
|
||||
thread_id: Optional thread ID for session persistence
|
||||
**kwargs: Additional arguments passed to parent
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self._agent = agent
|
||||
self._assistant_id = assistant_id
|
||||
self._backend = backend
|
||||
self._auto_approve = auto_approve
|
||||
self._cwd = str(cwd) if cwd else str(Path.cwd())
|
||||
# Avoid collision with App._thread_id
|
||||
self._lc_thread_id = thread_id
|
||||
self._status_bar: StatusBar | None = None
|
||||
self._chat_input: ChatInput | None = None
|
||||
self._quit_pending = False
|
||||
self._session_state: TextualSessionState | None = None
|
||||
self._ui_adapter: TextualUIAdapter | None = None
|
||||
self._pending_approval: asyncio.Future | None = None
|
||||
self._pending_approval_widget: Any = None
|
||||
# Agent task tracking for interruption
|
||||
self._agent_worker: Worker[None] | None = None
|
||||
self._agent_running = False
|
||||
self._loading_widget: LoadingWidget | None = None
|
||||
self._token_tracker: TextualTokenTracker | None = None
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the application layout."""
|
||||
# Main chat area with scrollable messages
|
||||
with VerticalScroll(id="chat"):
|
||||
yield WelcomeBanner(id="welcome-banner")
|
||||
yield Container(id="messages") # Container can have children mounted
|
||||
|
||||
# Bottom app container - holds either ChatInput OR ApprovalMenu (swapped)
|
||||
# This is OUTSIDE VerticalScroll so arrow keys work in approval
|
||||
with Container(id="bottom-app-container"):
|
||||
yield ChatInput(cwd=self._cwd, id="input-area")
|
||||
|
||||
# Status bar at bottom
|
||||
yield StatusBar(cwd=self._cwd, id="status-bar")
|
||||
|
||||
async def on_mount(self) -> None:
|
||||
"""Initialize components after mount."""
|
||||
self._status_bar = self.query_one("#status-bar", StatusBar)
|
||||
self._chat_input = self.query_one("#input-area", ChatInput)
|
||||
|
||||
# Set initial auto-approve state
|
||||
if self._auto_approve:
|
||||
self._status_bar.set_auto_approve(enabled=True)
|
||||
|
||||
# Create session state
|
||||
self._session_state = TextualSessionState(
|
||||
auto_approve=self._auto_approve,
|
||||
thread_id=self._lc_thread_id,
|
||||
)
|
||||
|
||||
# Create token tracker that updates status bar
|
||||
self._token_tracker = TextualTokenTracker(self._update_tokens)
|
||||
|
||||
# Create UI adapter if agent is provided
|
||||
if self._agent:
|
||||
self._ui_adapter = TextualUIAdapter(
|
||||
mount_message=self._mount_message,
|
||||
update_status=self._update_status,
|
||||
request_approval=self._request_approval,
|
||||
on_auto_approve_enabled=self._on_auto_approve_enabled,
|
||||
scroll_to_bottom=self._scroll_chat_to_bottom,
|
||||
)
|
||||
self._ui_adapter.set_token_tracker(self._token_tracker)
|
||||
|
||||
# Focus the input (autocomplete is now built into ChatInput)
|
||||
self._chat_input.focus_input()
|
||||
|
||||
def _update_status(self, message: str) -> None:
|
||||
"""Update the status bar with a message."""
|
||||
if self._status_bar:
|
||||
self._status_bar.set_status_message(message)
|
||||
|
||||
def _update_tokens(self, count: int) -> None:
|
||||
"""Update the token count in status bar."""
|
||||
if self._status_bar:
|
||||
self._status_bar.set_tokens(count)
|
||||
|
||||
def _scroll_chat_to_bottom(self) -> None:
|
||||
"""Scroll the chat area to the bottom.
|
||||
|
||||
Uses anchor() for smoother streaming - keeps scroll locked to bottom
|
||||
as new content is added without causing visual jumps.
|
||||
"""
|
||||
try:
|
||||
chat = self.query_one("#chat", VerticalScroll)
|
||||
# anchor() locks scroll to bottom and auto-scrolls as content grows
|
||||
# Much smoother than calling scroll_end() on every chunk
|
||||
chat.anchor()
|
||||
except NoMatches:
|
||||
pass
|
||||
|
||||
async def _request_approval(
|
||||
self,
|
||||
action_request: Any, # noqa: ANN401
|
||||
assistant_id: str | None,
|
||||
) -> asyncio.Future:
|
||||
"""Request user approval inline in the messages area.
|
||||
|
||||
Returns a Future that resolves to the user's decision.
|
||||
Mounts ApprovalMenu in the messages area (inline with chat).
|
||||
ChatInput stays visible - user can still see it.
|
||||
|
||||
If another approval is already pending, queue this one.
|
||||
"""
|
||||
loop = asyncio.get_running_loop()
|
||||
result_future: asyncio.Future = loop.create_future()
|
||||
|
||||
# If there's already a pending approval, wait for it to complete first
|
||||
if self._pending_approval_widget is not None:
|
||||
while self._pending_approval_widget is not None: # noqa: ASYNC110
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
# Create menu with unique ID to avoid conflicts
|
||||
unique_id = f"approval-menu-{uuid.uuid4().hex[:8]}"
|
||||
menu = ApprovalMenu(action_request, assistant_id, id=unique_id)
|
||||
menu.set_future(result_future)
|
||||
|
||||
# Store reference
|
||||
self._pending_approval_widget = menu
|
||||
|
||||
# Pause the loading spinner during approval
|
||||
if self._loading_widget:
|
||||
self._loading_widget.pause("Awaiting decision")
|
||||
|
||||
# Update status to show we're waiting for approval
|
||||
self._update_status("Waiting for approval...")
|
||||
|
||||
# Mount approval inline in messages area (not replacing ChatInput)
|
||||
try:
|
||||
messages = self.query_one("#messages", Container)
|
||||
await messages.mount(menu)
|
||||
self._scroll_chat_to_bottom()
|
||||
# Focus approval menu
|
||||
self.call_after_refresh(menu.focus)
|
||||
except Exception as e: # noqa: BLE001
|
||||
self._pending_approval_widget = None
|
||||
if not result_future.done():
|
||||
result_future.set_exception(e)
|
||||
|
||||
return result_future
|
||||
|
||||
def _on_auto_approve_enabled(self) -> None:
|
||||
"""Callback when auto-approve mode is enabled via HITL."""
|
||||
self._auto_approve = True
|
||||
if self._status_bar:
|
||||
self._status_bar.set_auto_approve(enabled=True)
|
||||
if self._session_state:
|
||||
self._session_state.auto_approve = True
|
||||
|
||||
async def on_chat_input_submitted(self, event: ChatInput.Submitted) -> None:
|
||||
"""Handle submitted input from ChatInput widget."""
|
||||
value = event.value
|
||||
mode = event.mode
|
||||
|
||||
# Reset quit pending state on any input
|
||||
self._quit_pending = False
|
||||
|
||||
# Handle different modes
|
||||
if mode == "bash":
|
||||
# Bash command - strip the ! prefix
|
||||
await self._handle_bash_command(value.removeprefix("!"))
|
||||
elif mode == "command":
|
||||
# Slash command
|
||||
await self._handle_command(value)
|
||||
else:
|
||||
# Normal message - will be sent to agent
|
||||
await self._handle_user_message(value)
|
||||
|
||||
def on_chat_input_mode_changed(self, event: ChatInput.ModeChanged) -> None:
|
||||
"""Update status bar when input mode changes."""
|
||||
if self._status_bar:
|
||||
self._status_bar.set_mode(event.mode)
|
||||
|
||||
async def on_approval_menu_decided(
|
||||
self,
|
||||
event: Any, # noqa: ANN401, ARG002
|
||||
) -> None:
|
||||
"""Handle approval menu decision - remove from messages and refocus input."""
|
||||
# Remove ApprovalMenu using stored reference
|
||||
if self._pending_approval_widget:
|
||||
await self._pending_approval_widget.remove()
|
||||
self._pending_approval_widget = None
|
||||
|
||||
# Resume the loading spinner after approval
|
||||
if self._loading_widget:
|
||||
self._loading_widget.resume()
|
||||
|
||||
# Clear status message
|
||||
self._update_status("")
|
||||
|
||||
# Refocus the chat input
|
||||
if self._chat_input:
|
||||
self.call_after_refresh(self._chat_input.focus_input)
|
||||
|
||||
async def _handle_bash_command(self, command: str) -> None:
|
||||
"""Handle a bash command (! prefix).
|
||||
|
||||
Args:
|
||||
command: The bash command to execute
|
||||
"""
|
||||
# Mount user message showing the bash command
|
||||
await self._mount_message(UserMessage(f"!{command}"))
|
||||
|
||||
# Execute the bash command (shell=True is intentional for user-requested bash)
|
||||
try:
|
||||
result = await asyncio.to_thread( # noqa: S604
|
||||
subprocess.run,
|
||||
command,
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=self._cwd,
|
||||
timeout=60,
|
||||
)
|
||||
output = result.stdout.strip()
|
||||
if result.stderr:
|
||||
output += f"\n[stderr]\n{result.stderr.strip()}"
|
||||
|
||||
if output:
|
||||
# Display output as assistant message (uses markdown for code blocks)
|
||||
msg = AssistantMessage(f"```\n{output}\n```")
|
||||
await self._mount_message(msg)
|
||||
await msg.write_initial_content()
|
||||
else:
|
||||
await self._mount_message(SystemMessage("Command completed (no output)"))
|
||||
|
||||
if result.returncode != 0:
|
||||
await self._mount_message(ErrorMessage(f"Exit code: {result.returncode}"))
|
||||
|
||||
# Scroll to show the output
|
||||
self._scroll_chat_to_bottom()
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
await self._mount_message(ErrorMessage("Command timed out (60s limit)"))
|
||||
except OSError as e:
|
||||
await self._mount_message(ErrorMessage(str(e)))
|
||||
|
||||
async def _handle_command(self, command: str) -> None:
|
||||
"""Handle a slash command.
|
||||
|
||||
Args:
|
||||
command: The slash command (including /)
|
||||
"""
|
||||
cmd = command.lower().strip()
|
||||
|
||||
if cmd in ("/quit", "/exit", "/q"):
|
||||
self.exit()
|
||||
elif cmd == "/help":
|
||||
await self._mount_message(UserMessage(command))
|
||||
await self._mount_message(
|
||||
SystemMessage("Commands: /quit, /clear, /tokens, /threads, /help")
|
||||
)
|
||||
elif cmd == "/clear":
|
||||
await self._clear_messages()
|
||||
# Reset thread to start fresh conversation
|
||||
if self._session_state:
|
||||
new_thread_id = self._session_state.reset_thread()
|
||||
await self._mount_message(SystemMessage(f"Started new session: {new_thread_id}"))
|
||||
elif cmd == "/threads":
|
||||
await self._mount_message(UserMessage(command))
|
||||
if self._session_state:
|
||||
await self._mount_message(
|
||||
SystemMessage(f"Current session: {self._session_state.thread_id}")
|
||||
)
|
||||
else:
|
||||
await self._mount_message(SystemMessage("No active session"))
|
||||
elif cmd == "/tokens":
|
||||
await self._mount_message(UserMessage(command))
|
||||
if self._token_tracker and self._token_tracker.current_context > 0:
|
||||
count = self._token_tracker.current_context
|
||||
if count >= 1000:
|
||||
formatted = f"{count / 1000:.1f}K"
|
||||
else:
|
||||
formatted = str(count)
|
||||
await self._mount_message(SystemMessage(f"Current context: {formatted} tokens"))
|
||||
else:
|
||||
await self._mount_message(SystemMessage("No token usage yet"))
|
||||
else:
|
||||
await self._mount_message(UserMessage(command))
|
||||
await self._mount_message(SystemMessage(f"Unknown command: {cmd}"))
|
||||
|
||||
async def _handle_user_message(self, message: str) -> None:
|
||||
"""Handle a user message to send to the agent.
|
||||
|
||||
Args:
|
||||
message: The user's message
|
||||
"""
|
||||
# Mount the user message
|
||||
await self._mount_message(UserMessage(message))
|
||||
|
||||
# Check if agent is available
|
||||
if self._agent and self._ui_adapter and self._session_state:
|
||||
# Show loading widget
|
||||
self._loading_widget = LoadingWidget("Thinking")
|
||||
await self._mount_message(self._loading_widget)
|
||||
self._agent_running = True
|
||||
|
||||
# Disable cursor blink while agent is working
|
||||
if self._chat_input:
|
||||
self._chat_input.set_cursor_active(active=False)
|
||||
|
||||
# Use run_worker to avoid blocking the main event loop
|
||||
# This allows the UI to remain responsive during agent execution
|
||||
self._agent_worker = self.run_worker(
|
||||
self._run_agent_task(message),
|
||||
exclusive=False,
|
||||
)
|
||||
else:
|
||||
await self._mount_message(
|
||||
SystemMessage("Agent not configured. Run with --agent flag or use standalone mode.")
|
||||
)
|
||||
|
||||
async def _run_agent_task(self, message: str) -> None:
|
||||
"""Run the agent task in a background worker.
|
||||
|
||||
This runs in a worker thread so the main event loop stays responsive.
|
||||
"""
|
||||
try:
|
||||
await execute_task_textual(
|
||||
user_input=message,
|
||||
agent=self._agent,
|
||||
assistant_id=self._assistant_id,
|
||||
session_state=self._session_state,
|
||||
adapter=self._ui_adapter,
|
||||
backend=self._backend,
|
||||
)
|
||||
except Exception as e: # noqa: BLE001
|
||||
await self._mount_message(ErrorMessage(f"Agent error: {e}"))
|
||||
finally:
|
||||
# Clean up loading widget and agent state
|
||||
await self._cleanup_agent_task()
|
||||
|
||||
async def _cleanup_agent_task(self) -> None:
|
||||
"""Clean up after agent task completes or is cancelled."""
|
||||
self._agent_running = False
|
||||
self._agent_worker = None
|
||||
|
||||
# Remove loading widget if present
|
||||
if self._loading_widget:
|
||||
with contextlib.suppress(Exception):
|
||||
await self._loading_widget.remove()
|
||||
self._loading_widget = None
|
||||
|
||||
# Re-enable cursor blink now that agent is done
|
||||
if self._chat_input:
|
||||
self._chat_input.set_cursor_active(active=True)
|
||||
|
||||
async def _mount_message(self, widget: Static) -> None:
|
||||
"""Mount a message widget to the messages area.
|
||||
|
||||
Args:
|
||||
widget: The message widget to mount
|
||||
"""
|
||||
try:
|
||||
messages = self.query_one("#messages", Container)
|
||||
await messages.mount(widget)
|
||||
# Scroll to bottom
|
||||
chat = self.query_one("#chat", VerticalScroll)
|
||||
chat.scroll_end(animate=False)
|
||||
except NoMatches:
|
||||
pass
|
||||
|
||||
async def _clear_messages(self) -> None:
|
||||
"""Clear the messages area."""
|
||||
try:
|
||||
messages = self.query_one("#messages", Container)
|
||||
await messages.remove_children()
|
||||
except NoMatches:
|
||||
# Widget not found - can happen during shutdown
|
||||
pass
|
||||
|
||||
def action_quit_or_interrupt(self) -> None:
|
||||
"""Handle Ctrl+C - interrupt agent, reject approval, or quit on double press.
|
||||
|
||||
Priority order:
|
||||
1. If agent is running, interrupt it (preserve input)
|
||||
2. If approval menu is active, reject it
|
||||
3. If double press (quit_pending), quit
|
||||
4. Otherwise show quit hint
|
||||
"""
|
||||
# If agent is running, interrupt it
|
||||
if self._agent_running and self._agent_worker:
|
||||
self._agent_worker.cancel()
|
||||
self._quit_pending = False
|
||||
return
|
||||
|
||||
# If approval menu is active, reject it
|
||||
if self._pending_approval_widget:
|
||||
self._pending_approval_widget.action_select_reject()
|
||||
self._quit_pending = False
|
||||
return
|
||||
|
||||
# Double Ctrl+C to quit
|
||||
if self._quit_pending:
|
||||
self.exit()
|
||||
else:
|
||||
self._quit_pending = True
|
||||
self.notify("Press Ctrl+C again to quit", timeout=3)
|
||||
|
||||
def action_interrupt(self) -> None:
|
||||
"""Handle escape key - interrupt agent or reject approval.
|
||||
|
||||
This is the primary way to stop a running agent.
|
||||
"""
|
||||
# If agent is running, interrupt it
|
||||
if self._agent_running and self._agent_worker:
|
||||
self._agent_worker.cancel()
|
||||
return
|
||||
|
||||
# If approval menu is active, reject it
|
||||
if self._pending_approval_widget:
|
||||
self._pending_approval_widget.action_select_reject()
|
||||
|
||||
def action_quit_app(self) -> None:
|
||||
"""Handle quit action (Ctrl+D)."""
|
||||
self.exit()
|
||||
|
||||
def action_toggle_auto_approve(self) -> None:
|
||||
"""Toggle auto-approve mode."""
|
||||
self._auto_approve = not self._auto_approve
|
||||
if self._status_bar:
|
||||
self._status_bar.set_auto_approve(enabled=self._auto_approve)
|
||||
if self._session_state:
|
||||
self._session_state.auto_approve = self._auto_approve
|
||||
|
||||
def action_toggle_tool_output(self) -> None:
|
||||
"""Toggle expand/collapse of the most recent tool output."""
|
||||
# Find all tool messages with output, get the most recent one
|
||||
try:
|
||||
tool_messages = list(self.query(ToolCallMessage))
|
||||
# Find ones with output, toggle the most recent
|
||||
for tool_msg in reversed(tool_messages):
|
||||
if tool_msg.has_output:
|
||||
tool_msg.toggle_output()
|
||||
return
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Approval menu action handlers (delegated from App-level bindings)
|
||||
# NOTE: These only activate when approval widget is pending AND input is not focused
|
||||
def action_approval_up(self) -> None:
|
||||
"""Handle up arrow in approval menu."""
|
||||
# Only handle if approval is active (input handles its own up for history/completion)
|
||||
if self._pending_approval_widget and not self._is_input_focused():
|
||||
self._pending_approval_widget.action_move_up()
|
||||
|
||||
def action_approval_down(self) -> None:
|
||||
"""Handle down arrow in approval menu."""
|
||||
if self._pending_approval_widget and not self._is_input_focused():
|
||||
self._pending_approval_widget.action_move_down()
|
||||
|
||||
def action_approval_select(self) -> None:
|
||||
"""Handle enter in approval menu."""
|
||||
# Only handle if approval is active AND input is not focused
|
||||
if self._pending_approval_widget and not self._is_input_focused():
|
||||
self._pending_approval_widget.action_select()
|
||||
|
||||
def _is_input_focused(self) -> bool:
|
||||
"""Check if the chat input (or its text area) has focus."""
|
||||
if not self._chat_input:
|
||||
return False
|
||||
focused = self.focused
|
||||
if focused is None:
|
||||
return False
|
||||
# Check if focused widget is the text area inside chat input
|
||||
return focused.id == "chat-input" or focused in self._chat_input.walk_children()
|
||||
|
||||
def action_approval_yes(self) -> None:
|
||||
"""Handle yes/1 in approval menu."""
|
||||
if self._pending_approval_widget:
|
||||
self._pending_approval_widget.action_select_approve()
|
||||
|
||||
def action_approval_no(self) -> None:
|
||||
"""Handle no/2 in approval menu."""
|
||||
if self._pending_approval_widget:
|
||||
self._pending_approval_widget.action_select_reject()
|
||||
|
||||
def action_approval_auto(self) -> None:
|
||||
"""Handle auto/3 in approval menu."""
|
||||
if self._pending_approval_widget:
|
||||
self._pending_approval_widget.action_select_auto()
|
||||
|
||||
def action_approval_escape(self) -> None:
|
||||
"""Handle escape in approval menu - reject."""
|
||||
if self._pending_approval_widget:
|
||||
self._pending_approval_widget.action_select_reject()
|
||||
|
||||
def on_mouse_up(self, event: MouseUp) -> None: # noqa: ARG002
|
||||
"""Copy selection to clipboard on mouse release."""
|
||||
copy_selection_to_clipboard(self)
|
||||
|
||||
|
||||
async def run_textual_app(
|
||||
*,
|
||||
agent: Pregel | None = None,
|
||||
assistant_id: str | None = None,
|
||||
backend: Any = None, # noqa: ANN401 # CompositeBackend
|
||||
auto_approve: bool = False,
|
||||
cwd: str | Path | None = None,
|
||||
thread_id: str | None = None,
|
||||
) -> None:
|
||||
"""Run the Textual application.
|
||||
|
||||
Args:
|
||||
agent: Pre-configured LangGraph agent (optional)
|
||||
assistant_id: Agent identifier for memory storage
|
||||
backend: Backend for file operations
|
||||
auto_approve: Whether to start with auto-approve enabled
|
||||
cwd: Current working directory to display
|
||||
thread_id: Optional thread ID for session persistence
|
||||
"""
|
||||
app = DeepAgentsApp(
|
||||
agent=agent,
|
||||
assistant_id=assistant_id,
|
||||
backend=backend,
|
||||
auto_approve=auto_approve,
|
||||
cwd=cwd,
|
||||
thread_id=thread_id,
|
||||
)
|
||||
await app.run_async()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import asyncio
|
||||
|
||||
asyncio.run(run_textual_app())
|
||||
@@ -0,0 +1,167 @@
|
||||
/* DeepAgents CLI Textual Stylesheet */
|
||||
|
||||
/* Define layers for z-ordering */
|
||||
Screen {
|
||||
layout: vertical;
|
||||
layers: base autocomplete;
|
||||
}
|
||||
|
||||
/* Main content goes on base layer by default */
|
||||
|
||||
/* Chat area - main scrollable messages area */
|
||||
#chat {
|
||||
height: 1fr;
|
||||
padding: 1 2;
|
||||
background: $background;
|
||||
}
|
||||
|
||||
/* Welcome banner */
|
||||
#welcome-banner {
|
||||
height: auto;
|
||||
margin-bottom: 1;
|
||||
}
|
||||
|
||||
/* Messages area */
|
||||
#messages {
|
||||
height: auto;
|
||||
}
|
||||
|
||||
/* Bottom app container - holds ChatInput OR ApprovalMenu */
|
||||
#bottom-app-container {
|
||||
height: auto;
|
||||
dock: bottom;
|
||||
margin-bottom: 3;
|
||||
padding: 0 1;
|
||||
}
|
||||
|
||||
/* Input area */
|
||||
#input-area {
|
||||
height: auto;
|
||||
min-height: 3;
|
||||
max-height: 12;
|
||||
}
|
||||
|
||||
/* Approval Menu - inline in messages area */
|
||||
.approval-menu {
|
||||
height: auto;
|
||||
margin: 1 0;
|
||||
padding: 1 2;
|
||||
background: $surface;
|
||||
border: solid $warning;
|
||||
}
|
||||
|
||||
.approval-menu .approval-title {
|
||||
text-style: bold;
|
||||
color: $warning;
|
||||
margin-bottom: 1;
|
||||
}
|
||||
|
||||
.approval-menu .approval-info {
|
||||
height: auto;
|
||||
color: $text-muted;
|
||||
margin-bottom: 1;
|
||||
}
|
||||
|
||||
.approval-menu .approval-option {
|
||||
height: 1;
|
||||
padding: 0 1;
|
||||
}
|
||||
|
||||
.approval-menu .approval-option-selected {
|
||||
background: $primary;
|
||||
text-style: bold;
|
||||
}
|
||||
|
||||
.approval-menu .approval-help {
|
||||
color: $text-muted;
|
||||
text-style: italic;
|
||||
margin-top: 0;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
/* Status bar */
|
||||
#status-bar {
|
||||
height: 1;
|
||||
dock: bottom;
|
||||
margin-bottom: 1;
|
||||
}
|
||||
|
||||
/* Tool approval widgets */
|
||||
.tool-approval-widget {
|
||||
height: auto;
|
||||
max-height: 20;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
.approval-file-path {
|
||||
color: $primary;
|
||||
text-style: bold;
|
||||
}
|
||||
|
||||
.approval-description {
|
||||
color: $text-muted;
|
||||
}
|
||||
|
||||
/* Diff styling */
|
||||
.diff-header {
|
||||
height: auto;
|
||||
color: $text-muted;
|
||||
}
|
||||
|
||||
.diff-removed {
|
||||
height: auto;
|
||||
color: #ff6b6b;
|
||||
background: #3d1f1f;
|
||||
}
|
||||
|
||||
.diff-added {
|
||||
height: auto;
|
||||
color: #69db7c;
|
||||
background: #1f3d1f;
|
||||
}
|
||||
|
||||
.diff-range {
|
||||
height: auto;
|
||||
color: $primary;
|
||||
text-style: bold;
|
||||
}
|
||||
|
||||
.diff-context {
|
||||
height: auto;
|
||||
color: $text-muted;
|
||||
}
|
||||
|
||||
/* Separator line between options and tool details */
|
||||
.approval-menu .approval-separator {
|
||||
height: 1;
|
||||
color: $warning;
|
||||
margin: 1 0 0 0;
|
||||
}
|
||||
|
||||
/* Scrollable tool info area in approval menu - at bottom */
|
||||
.approval-menu .tool-info-scroll {
|
||||
height: auto;
|
||||
max-height: 15;
|
||||
overflow-y: auto;
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
/* Options container with background */
|
||||
.approval-menu .approval-options-container {
|
||||
height: auto;
|
||||
background: $surface-darken-1;
|
||||
padding: 1;
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
/* Completion popup styling (used by ChatInput) - appears BELOW input */
|
||||
#completion-popup {
|
||||
height: auto;
|
||||
max-height: 12;
|
||||
width: 100%;
|
||||
margin-left: 3;
|
||||
margin-top: 0;
|
||||
padding: 0;
|
||||
background: $background;
|
||||
color: $text;
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
"""Clipboard utilities for deepagents-cli."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import os
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from textual.app import App
|
||||
|
||||
_PREVIEW_MAX_LENGTH = 40
|
||||
|
||||
|
||||
def _copy_osc52(text: str) -> None:
|
||||
"""Copy text using OSC 52 escape sequence (works over SSH/tmux)."""
|
||||
encoded = base64.b64encode(text.encode("utf-8")).decode("ascii")
|
||||
osc52_seq = f"\033]52;c;{encoded}\a"
|
||||
if os.environ.get("TMUX"):
|
||||
osc52_seq = f"\033Ptmux;\033{osc52_seq}\033\\"
|
||||
|
||||
with open("/dev/tty", "w") as tty:
|
||||
tty.write(osc52_seq)
|
||||
tty.flush()
|
||||
|
||||
|
||||
def _shorten_preview(texts: list[str]) -> str:
|
||||
"""Shorten text for notification preview."""
|
||||
dense_text = "⏎".join(texts).replace("\n", "⏎")
|
||||
if len(dense_text) > _PREVIEW_MAX_LENGTH:
|
||||
return f"{dense_text[: _PREVIEW_MAX_LENGTH - 1]}…"
|
||||
return dense_text
|
||||
|
||||
|
||||
def copy_selection_to_clipboard(app: App) -> None:
|
||||
"""Copy selected text from app widgets to clipboard.
|
||||
|
||||
This queries all widgets for their text_selection and copies
|
||||
any selected text to the system clipboard.
|
||||
"""
|
||||
selected_texts = []
|
||||
|
||||
for widget in app.query("*"):
|
||||
if not hasattr(widget, "text_selection") or not widget.text_selection:
|
||||
continue
|
||||
|
||||
selection = widget.text_selection
|
||||
|
||||
try:
|
||||
result = widget.get_selection(selection)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if not result:
|
||||
continue
|
||||
|
||||
selected_text, _ = result
|
||||
if selected_text.strip():
|
||||
selected_texts.append(selected_text)
|
||||
|
||||
if not selected_texts:
|
||||
return
|
||||
|
||||
combined_text = "\n".join(selected_texts)
|
||||
|
||||
# Try multiple clipboard methods
|
||||
copy_methods = [_copy_osc52, app.copy_to_clipboard]
|
||||
|
||||
# Try pyperclip if available
|
||||
try:
|
||||
import pyperclip
|
||||
|
||||
copy_methods.insert(1, pyperclip.copy)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
for copy_fn in copy_methods:
|
||||
try:
|
||||
copy_fn(combined_text)
|
||||
app.notify(
|
||||
f'"{_shorten_preview(selected_texts)}" copied',
|
||||
severity="information",
|
||||
timeout=2,
|
||||
)
|
||||
return
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# If all methods fail, still notify but warn
|
||||
app.notify(
|
||||
"Failed to copy - no clipboard method available",
|
||||
severity="warning",
|
||||
timeout=3,
|
||||
)
|
||||
@@ -1,87 +0,0 @@
|
||||
"""슬래시 명령 및 bash 실행을 위한 명령 처리기."""
|
||||
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
from langgraph.checkpoint.memory import InMemorySaver
|
||||
|
||||
from .config import COLORS, DEEP_AGENTS_ASCII, console
|
||||
from .ui import TokenTracker, show_interactive_help
|
||||
|
||||
|
||||
def handle_command(command: str, agent, token_tracker: TokenTracker) -> str | bool:
|
||||
"""슬래시 명령을 처리합니다. 종료하려면 'exit', 처리된 경우 True, 에이전트에게 전달하려면 False를 반환합니다."""
|
||||
cmd = command.lower().strip().lstrip("/")
|
||||
|
||||
if cmd in ["quit", "exit", "q"]:
|
||||
return "exit"
|
||||
|
||||
if cmd == "clear":
|
||||
# Reset agent conversation state
|
||||
agent.checkpointer = InMemorySaver()
|
||||
|
||||
# Reset token tracking to baseline
|
||||
token_tracker.reset()
|
||||
|
||||
# Clear screen and show fresh UI
|
||||
console.clear()
|
||||
console.print(DEEP_AGENTS_ASCII, style=f"bold {COLORS['primary']}")
|
||||
console.print()
|
||||
console.print("... 새로 시작! 화면이 지워지고 대화가 초기화되었습니다.", style=COLORS["agent"])
|
||||
console.print()
|
||||
return True
|
||||
|
||||
if cmd == "help":
|
||||
show_interactive_help()
|
||||
return True
|
||||
|
||||
if cmd == "tokens":
|
||||
token_tracker.display_session()
|
||||
return True
|
||||
|
||||
console.print()
|
||||
console.print(f"[yellow]알 수 없는 명령: /{cmd}[/yellow]")
|
||||
console.print("[dim]사용 가능한 명령을 보려면 /help를 입력하세요.[/dim]")
|
||||
console.print()
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def execute_bash_command(command: str) -> bool:
|
||||
"""bash 명령을 실행하고 출력을 표시합니다. 처리된 경우 True를 반환합니다."""
|
||||
cmd = command.strip().lstrip("!")
|
||||
|
||||
if not cmd:
|
||||
return True
|
||||
|
||||
try:
|
||||
console.print()
|
||||
console.print(f"[dim]$ {cmd}[/dim]")
|
||||
|
||||
# Execute the command
|
||||
result = subprocess.run(
|
||||
cmd, check=False, shell=True, capture_output=True, text=True, timeout=30, cwd=Path.cwd()
|
||||
)
|
||||
|
||||
# Display output
|
||||
if result.stdout:
|
||||
console.print(result.stdout, style=COLORS["dim"], markup=False)
|
||||
if result.stderr:
|
||||
console.print(result.stderr, style="red", markup=False)
|
||||
|
||||
# Show return code if non-zero
|
||||
if result.returncode != 0:
|
||||
console.print(f"[dim]Exit code: {result.returncode}[/dim]")
|
||||
|
||||
console.print()
|
||||
return True
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
console.print("[red]30초 후 명령 시간 초과[/red]")
|
||||
console.print()
|
||||
return True
|
||||
except Exception as e:
|
||||
console.print(f"[red]명령 실행 오류: {e}[/red]")
|
||||
console.print()
|
||||
return True
|
||||
@@ -1,4 +1,4 @@
|
||||
"""CLI를 위한 구성, 상수 밎 모델 생성."""
|
||||
"""Configuration, constants, and model creation for the CLI."""
|
||||
|
||||
import os
|
||||
import re
|
||||
@@ -55,14 +55,13 @@ DEEP_AGENTS_ASCII = f"""
|
||||
v{__version__}
|
||||
"""
|
||||
|
||||
# Interactive commands
|
||||
# Interactive commands
|
||||
COMMANDS = {
|
||||
"clear": "화면을 지우고 대화를 재설정합니다",
|
||||
"help": "도움말 정보를 표시합니다",
|
||||
"tokens": "현재 세션의 토큰 사용량을 표시합니다",
|
||||
"quit": "CLI를 종료합니다",
|
||||
"exit": "CLI를 종료합니다",
|
||||
"clear": "Clear screen and reset conversation",
|
||||
"help": "Show help information",
|
||||
"tokens": "Show token usage for current session",
|
||||
"quit": "Exit the CLI",
|
||||
"exit": "Exit the CLI",
|
||||
}
|
||||
|
||||
|
||||
@@ -77,16 +76,16 @@ console = Console(highlight=False)
|
||||
|
||||
|
||||
def _find_project_root(start_path: Path | None = None) -> Path | None:
|
||||
"""git 디렉터리를 찾아 프로젝트 루트를 찾습니다.
|
||||
"""Find the project root by looking for .git directory.
|
||||
|
||||
start_path(또는 cwd)에서 디렉터리 트리를 따라 올라가며 프로젝트 루트를 나타내는
|
||||
.git 디렉터리를 찾습니다.
|
||||
Walks up the directory tree from start_path (or cwd) looking for a .git
|
||||
directory, which indicates the project root.
|
||||
|
||||
Args:
|
||||
start_path: 검색을 시작할 디렉터리. 기본값은 현재 작업 디렉터리입니다.
|
||||
start_path: Directory to start searching from. Defaults to current working directory.
|
||||
|
||||
Returns:
|
||||
찾은 경우 프로젝트 루트의 경로, 그렇지 않으면 None입니다.
|
||||
Path to the project root if found, None otherwise.
|
||||
"""
|
||||
current = Path(start_path or Path.cwd()).resolve()
|
||||
|
||||
@@ -100,29 +99,29 @@ def _find_project_root(start_path: Path | None = None) -> Path | None:
|
||||
|
||||
|
||||
def _find_project_agent_md(project_root: Path) -> list[Path]:
|
||||
"""프로젝트별 agent.md 파일(들)을 찾습니다.
|
||||
"""Find project-specific AGENTS.md file(s).
|
||||
|
||||
두 위치를 확인하고 존재하는 모든 위치를 반환합니다:
|
||||
1. project_root/.deepagents/agent.md
|
||||
2. project_root/agent.md
|
||||
Checks two locations and returns ALL that exist:
|
||||
1. project_root/.deepagents/AGENTS.md
|
||||
2. project_root/AGENTS.md
|
||||
|
||||
두 파일이 모두 존재하면 둘 다 로드되어 결합됩니다.
|
||||
Both files will be loaded and combined if both exist.
|
||||
|
||||
Args:
|
||||
project_root: 프로젝트 루트 디렉터리 경로.
|
||||
project_root: Path to the project root directory.
|
||||
|
||||
Returns:
|
||||
프로젝트 agent.md 파일 경로 목록 (0, 1 또는 2개의 경로를 포함할 수 있음).
|
||||
List of paths to project AGENTS.md files (may contain 0, 1, or 2 paths).
|
||||
"""
|
||||
paths = []
|
||||
|
||||
# Check .deepagents/agent.md (preferred)
|
||||
deepagents_md = project_root / ".deepagents" / "agent.md"
|
||||
# Check .deepagents/AGENTS.md (preferred)
|
||||
deepagents_md = project_root / ".deepagents" / "AGENTS.md"
|
||||
if deepagents_md.exists():
|
||||
paths.append(deepagents_md)
|
||||
|
||||
# Check root agent.md (fallback, but also include if both exist)
|
||||
root_md = project_root / "agent.md"
|
||||
# Check root AGENTS.md (fallback, but also include if both exist)
|
||||
root_md = project_root / "AGENTS.md"
|
||||
if root_md.exists():
|
||||
paths.append(root_md)
|
||||
|
||||
@@ -131,22 +130,22 @@ def _find_project_agent_md(project_root: Path) -> list[Path]:
|
||||
|
||||
@dataclass
|
||||
class Settings:
|
||||
"""DeepAgents-cli를 위한 전역 설정 및 환경 감지.
|
||||
"""Global settings and environment detection for deepagents-cli.
|
||||
|
||||
이 클래스는 시작 시 한 번 초기화되며 다음 정보에 대한 액세스를 제공합니다:
|
||||
- 사용 가능한 모델 및 API 키
|
||||
- 현재 프로젝트 정보
|
||||
- 도구 가용성 (예: Tavily)
|
||||
- 파일 시스템 경로
|
||||
This class is initialized once at startup and provides access to:
|
||||
- Available models and API keys
|
||||
- Current project information
|
||||
- Tool availability (e.g., Tavily)
|
||||
- File system paths
|
||||
|
||||
Attributes:
|
||||
project_root: 현재 프로젝트 루트 디렉터리 (git 프로젝트 내인 경우)
|
||||
project_root: Current project root directory (if in a git project)
|
||||
|
||||
openai_api_key: OpenAI API 키 (사용 가능한 경우)
|
||||
anthropic_api_key: Anthropic API 키 (사용 가능한 경우)
|
||||
tavily_api_key: Tavily API 키 (사용 가능한 경우)
|
||||
deepagents_langchain_project: DeepAgents 에이전트 추적을 위한 LangSmith 프로젝트 이름
|
||||
user_langchain_project: 환경의 원래 LANGSMITH_PROJECT (사용자 코드용)
|
||||
openai_api_key: OpenAI API key if available
|
||||
anthropic_api_key: Anthropic API key if available
|
||||
tavily_api_key: Tavily API key if available
|
||||
deepagents_langchain_project: LangSmith project name for deepagents agent tracing
|
||||
user_langchain_project: Original LANGSMITH_PROJECT from environment (for user code)
|
||||
"""
|
||||
|
||||
# API keys
|
||||
@@ -168,13 +167,13 @@ class Settings:
|
||||
|
||||
@classmethod
|
||||
def from_environment(cls, *, start_path: Path | None = None) -> "Settings":
|
||||
"""현재 환경을 감지하여 설정을 생성합니다.
|
||||
"""Create settings by detecting the current environment.
|
||||
|
||||
Args:
|
||||
start_path: 프로젝트 감지를 시작할 디렉터리(기본값은 cwd)
|
||||
start_path: Directory to start project detection from (defaults to cwd)
|
||||
|
||||
Returns:
|
||||
감지된 구성이 포함된 Settings 인스턴스
|
||||
Settings instance with detected configuration
|
||||
"""
|
||||
# Detect API keys
|
||||
openai_key = os.environ.get("OPENAI_API_KEY")
|
||||
@@ -205,84 +204,84 @@ class Settings:
|
||||
|
||||
@property
|
||||
def has_openai(self) -> bool:
|
||||
"""OpenAI API 키가 구성되어 있는지 확인합니다."""
|
||||
"""Check if OpenAI API key is configured."""
|
||||
return self.openai_api_key is not None
|
||||
|
||||
@property
|
||||
def has_anthropic(self) -> bool:
|
||||
"""Anthropic API 키가 구성되어 있는지 확인합니다."""
|
||||
"""Check if Anthropic API key is configured."""
|
||||
return self.anthropic_api_key is not None
|
||||
|
||||
@property
|
||||
def has_google(self) -> bool:
|
||||
"""Google API 키가 구성되어 있는지 확인합니다."""
|
||||
"""Check if Google API key is configured."""
|
||||
return self.google_api_key is not None
|
||||
|
||||
@property
|
||||
def has_tavily(self) -> bool:
|
||||
"""Tavily API 키가 구성되어 있는지 확인합니다."""
|
||||
"""Check if Tavily API key is configured."""
|
||||
return self.tavily_api_key is not None
|
||||
|
||||
@property
|
||||
def has_deepagents_langchain_project(self) -> bool:
|
||||
"""DeepAgents LangChain 프로젝트 이름이 구성되어 있는지 확인합니다."""
|
||||
"""Check if deepagents LangChain project name is configured."""
|
||||
return self.deepagents_langchain_project is not None
|
||||
|
||||
@property
|
||||
def has_project(self) -> bool:
|
||||
"""현재 git 프로젝트 내에 있는지 확인합니다."""
|
||||
"""Check if currently in a git project."""
|
||||
return self.project_root is not None
|
||||
|
||||
@property
|
||||
def user_deepagents_dir(self) -> Path:
|
||||
"""기본 사용자 수준 .deepagents 디렉터리를 가져옵니다.
|
||||
"""Get the base user-level .deepagents directory.
|
||||
|
||||
Returns:
|
||||
~/.deepagents 경로
|
||||
Path to ~/.deepagents
|
||||
"""
|
||||
return Path.home() / ".deepagents"
|
||||
|
||||
def get_user_agent_md_path(self, agent_name: str) -> Path:
|
||||
"""특정 에이전트에 대한 사용자 수준 agent.md 경로를 가져옵니다.
|
||||
"""Get user-level AGENTS.md path for a specific agent.
|
||||
|
||||
파일 존재 여부와 상관없이 경로를 반환합니다.
|
||||
Returns path regardless of whether the file exists.
|
||||
|
||||
Args:
|
||||
agent_name: 에이전트 이름
|
||||
agent_name: Name of the agent
|
||||
|
||||
Returns:
|
||||
~/.deepagents/{agent_name}/agent.md 경로
|
||||
Path to ~/.deepagents/{agent_name}/AGENTS.md
|
||||
"""
|
||||
return Path.home() / ".deepagents" / agent_name / "agent.md"
|
||||
return Path.home() / ".deepagents" / agent_name / "AGENTS.md"
|
||||
|
||||
def get_project_agent_md_path(self) -> Path | None:
|
||||
"""프로젝트 수준 agent.md 경로를 가져옵니다.
|
||||
"""Get project-level AGENTS.md path.
|
||||
|
||||
파일 존재 여부와 상관없이 경로를 반환합니다.
|
||||
Returns path regardless of whether the file exists.
|
||||
|
||||
Returns:
|
||||
{project_root}/.deepagents/agent.md 경로, 프로젝트 내에 없는 경우 None
|
||||
Path to {project_root}/.deepagents/AGENTS.md, or None if not in a project
|
||||
"""
|
||||
if not self.project_root:
|
||||
return None
|
||||
return self.project_root / ".deepagents" / "agent.md"
|
||||
return self.project_root / ".deepagents" / "AGENTS.md"
|
||||
|
||||
@staticmethod
|
||||
def _is_valid_agent_name(agent_name: str) -> bool:
|
||||
"""유효하지 않은 파일시스템 경로 및 보안 문제를 방지하기 위해 검증합니다."""
|
||||
"""Validate prevent invalid filesystem paths and security issues."""
|
||||
if not agent_name or not agent_name.strip():
|
||||
return False
|
||||
# Allow only alphanumeric, hyphens, underscores, and whitespace
|
||||
return bool(re.match(r"^[a-zA-Z0-9_\-\s]+$", agent_name))
|
||||
|
||||
def get_agent_dir(self, agent_name: str) -> Path:
|
||||
"""전역 에이전트 디렉터리 경로를 가져옵니다.
|
||||
"""Get the global agent directory path.
|
||||
|
||||
Args:
|
||||
agent_name: 에이전트 이름
|
||||
agent_name: Name of the agent
|
||||
|
||||
Returns:
|
||||
~/.deepagents/{agent_name} 경로
|
||||
Path to ~/.deepagents/{agent_name}
|
||||
"""
|
||||
if not self._is_valid_agent_name(agent_name):
|
||||
msg = (
|
||||
@@ -293,13 +292,13 @@ class Settings:
|
||||
return Path.home() / ".deepagents" / agent_name
|
||||
|
||||
def ensure_agent_dir(self, agent_name: str) -> Path:
|
||||
"""전역 에이전트 디렉터리가 존재하는지 확인하고 경로를 반환합니다.
|
||||
"""Ensure the global agent directory exists and return its path.
|
||||
|
||||
Args:
|
||||
agent_name: 에이전트 이름
|
||||
agent_name: Name of the agent
|
||||
|
||||
Returns:
|
||||
~/.deepagents/{agent_name} 경로
|
||||
Path to ~/.deepagents/{agent_name}
|
||||
"""
|
||||
if not self._is_valid_agent_name(agent_name):
|
||||
msg = (
|
||||
@@ -312,10 +311,10 @@ class Settings:
|
||||
return agent_dir
|
||||
|
||||
def ensure_project_deepagents_dir(self) -> Path | None:
|
||||
"""프로젝트 .deepagents 디렉터리가 존재하는지 확인하고 경로를 반환합니다.
|
||||
"""Ensure the project .deepagents directory exists and return its path.
|
||||
|
||||
Returns:
|
||||
프로젝트 .deepagents 디렉터리 경로, 프로젝트 내에 없는 경우 None
|
||||
Path to project .deepagents directory, or None if not in a project
|
||||
"""
|
||||
if not self.project_root:
|
||||
return None
|
||||
@@ -325,44 +324,44 @@ class Settings:
|
||||
return project_deepagents_dir
|
||||
|
||||
def get_user_skills_dir(self, agent_name: str) -> Path:
|
||||
"""특정 에이전트에 대한 사용자 수준 기술(skills) 디렉터리 경로를 가져옵니다.
|
||||
"""Get user-level skills directory path for a specific agent.
|
||||
|
||||
Args:
|
||||
agent_name: 에이전트 이름
|
||||
agent_name: Name of the agent
|
||||
|
||||
Returns:
|
||||
~/.deepagents/{agent_name}/skills/ 경로
|
||||
Path to ~/.deepagents/{agent_name}/skills/
|
||||
"""
|
||||
return self.get_agent_dir(agent_name) / "skills"
|
||||
|
||||
def ensure_user_skills_dir(self, agent_name: str) -> Path:
|
||||
"""사용자 수준 기술(skills) 디렉터리가 존재하는지 확인하고 경로를 반환합니다.
|
||||
"""Ensure user-level skills directory exists and return its path.
|
||||
|
||||
Args:
|
||||
agent_name: 에이전트 이름
|
||||
agent_name: Name of the agent
|
||||
|
||||
Returns:
|
||||
~/.deepagents/{agent_name}/skills/ 경로
|
||||
Path to ~/.deepagents/{agent_name}/skills/
|
||||
"""
|
||||
skills_dir = self.get_user_skills_dir(agent_name)
|
||||
skills_dir.mkdir(parents=True, exist_ok=True)
|
||||
return skills_dir
|
||||
|
||||
def get_project_skills_dir(self) -> Path | None:
|
||||
"""프로젝트 수준 기술(skills) 디렉터리 경로를 가져옵니다.
|
||||
"""Get project-level skills directory path.
|
||||
|
||||
Returns:
|
||||
{project_root}/.deepagents/skills/ 경로, 프로젝트 내에 없는 경우 None
|
||||
Path to {project_root}/.deepagents/skills/, or None if not in a project
|
||||
"""
|
||||
if not self.project_root:
|
||||
return None
|
||||
return self.project_root / ".deepagents" / "skills"
|
||||
|
||||
def ensure_project_skills_dir(self) -> Path | None:
|
||||
"""프로젝트 수준 기술(skills) 디렉터리가 존재하는지 확인하고 경로를 반환합니다.
|
||||
"""Ensure project-level skills directory exists and return its path.
|
||||
|
||||
Returns:
|
||||
{project_root}/.deepagents/skills/ 경로, 프로젝트 내에 없는 경우 None
|
||||
Path to {project_root}/.deepagents/skills/, or None if not in a project
|
||||
"""
|
||||
if not self.project_root:
|
||||
return None
|
||||
@@ -376,7 +375,7 @@ settings = Settings.from_environment()
|
||||
|
||||
|
||||
class SessionState:
|
||||
"""변경 가능한 세션 상태를 유지합니다 (자동 승인 모드 등)."""
|
||||
"""Holds mutable session state (auto-approve mode, etc)."""
|
||||
|
||||
def __init__(self, auto_approve: bool = False, no_splash: bool = False) -> None:
|
||||
self.auto_approve = auto_approve
|
||||
@@ -386,29 +385,29 @@ class SessionState:
|
||||
self.thread_id = str(uuid.uuid4())
|
||||
|
||||
def toggle_auto_approve(self) -> bool:
|
||||
"""자동 승인을 토글하고 새로운 상태를 반환합니다."""
|
||||
"""Toggle auto-approve and return new state."""
|
||||
self.auto_approve = not self.auto_approve
|
||||
return self.auto_approve
|
||||
|
||||
|
||||
def get_default_coding_instructions() -> str:
|
||||
"""기본 코딩 에이전트 지침을 가져옵니다.
|
||||
"""Get the default coding agent instructions.
|
||||
|
||||
이는 에이전트가 수정할 수 없는 불변의 기본 지침입니다.
|
||||
장기 메모리(agent.md)는 미들웨어에서 별도로 처리합니다.
|
||||
These are the immutable base instructions that cannot be modified by the agent.
|
||||
Long-term memory (AGENTS.md) is handled separately by the middleware.
|
||||
"""
|
||||
default_prompt_path = Path(__file__).parent / "default_agent_prompt.md"
|
||||
return default_prompt_path.read_text()
|
||||
|
||||
|
||||
def _detect_provider(model_name: str) -> str | None:
|
||||
"""모델 이름에서 공급자를 자동 감지합니다.
|
||||
"""Auto-detect provider from model name.
|
||||
|
||||
Args:
|
||||
model_name: 공급자를 감지할 모델 이름
|
||||
model_name: Model name to detect provider from
|
||||
|
||||
Returns:
|
||||
공급자 이름(openai, anthropic, google) 또는 감지할 수 없는 경우 None
|
||||
Provider name (openai, anthropic, google) or None if can't detect
|
||||
"""
|
||||
model_lower = model_name.lower()
|
||||
if any(x in model_lower for x in ["gpt", "o1", "o3"]):
|
||||
@@ -421,18 +420,18 @@ def _detect_provider(model_name: str) -> str | None:
|
||||
|
||||
|
||||
def create_model(model_name_override: str | None = None) -> BaseChatModel:
|
||||
"""사용 가능한 API 키를 기반으로 적절한 모델을 생성합니다.
|
||||
"""Create the appropriate model based on available API keys.
|
||||
|
||||
전역 설정 인스턴스를 사용하여 생성할 모델을 결정합니다.
|
||||
Uses the global settings instance to determine which model to create.
|
||||
|
||||
Args:
|
||||
model_name_override: 환경 변수 대신 사용할 선택적 모델 이름
|
||||
model_name_override: Optional model name to use instead of environment variable
|
||||
|
||||
Returns:
|
||||
ChatModel 인스턴스 (OpenAI, Anthropic, 또는 Google)
|
||||
ChatModel instance (OpenAI, Anthropic, or Google)
|
||||
|
||||
Raises:
|
||||
API 키가 구성되지 않았거나 모델 공급자를 결정할 수 없는 경우 SystemExit
|
||||
SystemExit if no API key is configured or model provider can't be determined
|
||||
"""
|
||||
# Determine provider and model
|
||||
if model_name_override:
|
||||
@@ -440,9 +439,9 @@ def create_model(model_name_override: str | None = None) -> BaseChatModel:
|
||||
provider = _detect_provider(model_name_override)
|
||||
if not provider:
|
||||
console.print(
|
||||
f"[bold red]오류:[/bold red] 모델 이름에서 공급자를 감지할 수 없습니다: {model_name_override}"
|
||||
f"[bold red]Error:[/bold red] Could not detect provider from model name: {model_name_override}"
|
||||
)
|
||||
console.print("\n지원되는 모델 이름 패턴:")
|
||||
console.print("\nSupported model name patterns:")
|
||||
console.print(" - OpenAI: gpt-*, o1-*, o3-*")
|
||||
console.print(" - Anthropic: claude-*")
|
||||
console.print(" - Google: gemini-*")
|
||||
@@ -450,15 +449,19 @@ def create_model(model_name_override: str | None = None) -> BaseChatModel:
|
||||
|
||||
# Check if API key for detected provider is available
|
||||
if provider == "openai" and not settings.has_openai:
|
||||
console.print(f"[bold red]오류:[/bold red] 모델 '{model_name_override}'은(는) OPENAI_API_KEY가 필요합니다")
|
||||
console.print(
|
||||
f"[bold red]Error:[/bold red] Model '{model_name_override}' requires OPENAI_API_KEY"
|
||||
)
|
||||
sys.exit(1)
|
||||
elif provider == "anthropic" and not settings.has_anthropic:
|
||||
console.print(
|
||||
f"[bold red]오류:[/bold red] 모델 '{model_name_override}'은(는) ANTHROPIC_API_KEY가 필요합니다"
|
||||
f"[bold red]Error:[/bold red] Model '{model_name_override}' requires ANTHROPIC_API_KEY"
|
||||
)
|
||||
sys.exit(1)
|
||||
elif provider == "google" and not settings.has_google:
|
||||
console.print(f"[bold red]오류:[/bold red] 모델 '{model_name_override}'은(는) GOOGLE_API_KEY가 필요합니다")
|
||||
console.print(
|
||||
f"[bold red]Error:[/bold red] Model '{model_name_override}' requires GOOGLE_API_KEY"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
model_name = model_name_override
|
||||
@@ -473,14 +476,14 @@ def create_model(model_name_override: str | None = None) -> BaseChatModel:
|
||||
provider = "google"
|
||||
model_name = os.environ.get("GOOGLE_MODEL", "gemini-3-pro-preview")
|
||||
else:
|
||||
console.print("[bold red]오류:[/bold red] API 키가 구성되지 않았습니다.")
|
||||
console.print("\n다음 환경 변수 중 하나를 설정하십시오:")
|
||||
console.print(" - OPENAI_API_KEY (OpenAI 모델용, 예: gpt-5-mini)")
|
||||
console.print(" - ANTHROPIC_API_KEY (Claude 모델용)")
|
||||
console.print(" - GOOGLE_API_KEY (Google Gemini 모델용)")
|
||||
console.print("\n예시:")
|
||||
console.print("[bold red]Error:[/bold red] No API key configured.")
|
||||
console.print("\nPlease set one of the following environment variables:")
|
||||
console.print(" - OPENAI_API_KEY (for OpenAI models like gpt-5-mini)")
|
||||
console.print(" - ANTHROPIC_API_KEY (for Claude models)")
|
||||
console.print(" - GOOGLE_API_KEY (for Google Gemini models)")
|
||||
console.print("\nExample:")
|
||||
console.print(" export OPENAI_API_KEY=your_api_key_here")
|
||||
console.print("\n또는 .env 파일에 추가하십시오.")
|
||||
console.print("\nOr add it to your .env file.")
|
||||
sys.exit(1)
|
||||
|
||||
# Store model info in settings for display
|
||||
|
||||
@@ -1,98 +1,65 @@
|
||||
You are an AI assistant that helps users with various tasks such as coding, research, and analysis.
|
||||
You are an AI assistant that helps users with various tasks including coding, research, and analysis.
|
||||
|
||||
# Core Role
|
||||
Your core role and behavior can be updated based on user feedback and instructions. If the user instructs you on how to behave or about your role, immediately update this memory file to reflect those instructions.
|
||||
# Core Behavior
|
||||
|
||||
## Memory-First Protocol
|
||||
You have access to a persistent memory system. Always follow this protocol:
|
||||
|
||||
**At the start of a session:**
|
||||
- Check `ls /memories/` to see what knowledge is stored.
|
||||
- If a specific topic is mentioned in the role description, check related guides in `/memories/`.
|
||||
|
||||
**Before answering a question:**
|
||||
- When asked "What do you know about X?" or "How do I do Y?" → Check `ls /memories/` first.
|
||||
- If a relevant memory file exists → Read it and answer based on the saved knowledge.
|
||||
- Prioritize stored knowledge over general knowledge.
|
||||
|
||||
**When learning new information:**
|
||||
- If the user teaches you something or asks you to remember something → Save it to `/memories/[topic].md`.
|
||||
- Use descriptive filenames: Use `/memories/deep-agents-guide.md` instead of `/memories/notes.md`.
|
||||
- After saving, read specific content again to verify.
|
||||
|
||||
**Important:** Your memory persists between sessions. Information stored in `/memories/` is more reliable than general knowledge for topics you have specifically learned.
|
||||
|
||||
# Tone and Style
|
||||
Be concise and direct. Answer within 4 lines unless the user asks for details.
|
||||
Stop after finishing file operations - Do not explain what you did unless asked.
|
||||
Be concise and direct. Answer in fewer than 4 lines unless the user asks for detail.
|
||||
After working on a file, just stop - don't explain what you did unless asked.
|
||||
Avoid unnecessary introductions or conclusions.
|
||||
|
||||
When executing unimportant bash commands, briefly explain what you are doing.
|
||||
When you run non-trivial bash commands, briefly explain what they do.
|
||||
|
||||
## Proactiveness
|
||||
Take action when requested, but do not surprise the user with unrequested actions.
|
||||
If asked about an approach, answer first before taking action.
|
||||
Take action when asked, but don't surprise users with unrequested actions.
|
||||
If asked how to approach something, answer first before taking action.
|
||||
|
||||
## Following Conventions
|
||||
- Check existing code before assuming the availability of libraries and frameworks.
|
||||
- Mimic existing code style, naming conventions, and patterns.
|
||||
- Do not add comments unless requested.
|
||||
- Check existing code for libraries and frameworks before assuming availability
|
||||
- Mimic existing code style, naming conventions, and patterns
|
||||
- Never add comments unless asked
|
||||
|
||||
## Task Management
|
||||
Use `write_todos` for complex multi-step tasks (3 or more steps). Mark tasks as `in_progress` before starting, and `completed` immediately after finishing.
|
||||
Perform simple 1-2 step tasks immediately without todos.
|
||||
Use write_todos for complex multi-step tasks (3+ steps). Mark tasks in_progress before starting, completed immediately after finishing.
|
||||
For simple 1-2 step tasks, just do them directly without todos.
|
||||
|
||||
## File Reading Best Practices
|
||||
|
||||
**Important**: When navigating the codebase or reading multiple files, always use pagination to prevent context overflow.
|
||||
When exploring codebases or reading multiple files, use pagination to prevent context overflow.
|
||||
|
||||
**Codebase Navigation Patterns:**
|
||||
1. First Scan: `read_file(path, limit=100)` - Check file structure and key sections
|
||||
2. Targeted Reading: `read_file(path, offset=100, limit=200)` - Read specific sections if needed
|
||||
3. Full Reading: Use `read_file(path)` without limits only when needed for editing
|
||||
**Pattern for codebase exploration:**
|
||||
1. First scan: `read_file(path, limit=100)` - See file structure and key sections
|
||||
2. Targeted read: `read_file(path, offset=100, limit=200)` - Read specific sections if needed
|
||||
3. Full read: Only use `read_file(path)` without limit when necessary for editing
|
||||
|
||||
**When to use pagination:**
|
||||
- Reading any file exceeding 500 lines
|
||||
- Exploring unfamiliar codebases (Always start with limit=100)
|
||||
- Reading multiple files in succession
|
||||
- All research or investigation tasks
|
||||
**When to paginate:**
|
||||
- Reading any file >500 lines
|
||||
- Exploring unfamiliar codebases (always start with limit=100)
|
||||
- Reading multiple files in sequence
|
||||
|
||||
**When full reading is allowed:**
|
||||
- Small files (under 500 lines)
|
||||
- Files required to be edited immediately after reading
|
||||
- After verifying file size with a first scan
|
||||
**When full read is OK:**
|
||||
- Small files (<500 lines)
|
||||
- Files you need to edit immediately after reading
|
||||
|
||||
**Workflow Example:**
|
||||
```
|
||||
Bad: read_file(/src/large_module.py) # Fills context with 2000+ lines of code
|
||||
Good: read_file(/src/large_module.py, limit=100) # Scan structure first
|
||||
read_file(/src/large_module.py, offset=100, limit=100) # Read relevant section
|
||||
```
|
||||
|
||||
## Working with Subagents (Task Tools)
|
||||
## Working with Subagents (task tool)
|
||||
When delegating to subagents:
|
||||
- **Use Filesystem for Large I/O**: If input instructions are large (500+ words) or expected output is large, communicate via files.
|
||||
- Write input context/instructions to a file, and instruct the subagent to read it.
|
||||
- Ask the subagent to write output to a file, and read it after the subagent returns.
|
||||
- This prevents token bloat in both directions and keeps context manageable.
|
||||
- **Parallelize Independent Tasks**: When tasks are independent, create parallel subagents to work simultaneously.
|
||||
- **Clear Specifications**: Precisely inform the subagent of the required format/structure in their response or output file.
|
||||
- **Main Agent Synthesis**: Once subagents collect/execute, the main agent integrates results into the final output.
|
||||
- **Use filesystem for large I/O**: If input/output is large (>500 words), communicate via files
|
||||
- **Parallelize independent work**: Spawn parallel subagents for independent tasks
|
||||
- **Clear specifications**: Tell subagent exactly what format/structure you need
|
||||
- **Main agent synthesizes**: Subagents gather/execute, main agent integrates results
|
||||
|
||||
## Tools
|
||||
|
||||
### execute_bash
|
||||
Executes shell commands. Always allow path with spaces to be quoted.
|
||||
bash commands are executed in the current working directory.
|
||||
Example: `pytest /foo/bar/tests` (Good), `cd /foo/bar && pytest tests` (Bad)
|
||||
### shell
|
||||
Execute shell commands. Always quote paths with spaces.
|
||||
The bash command will be run from your current working directory.
|
||||
Examples: `pytest /foo/bar/tests` (good), `cd /foo/bar && pytest tests` (bad)
|
||||
|
||||
### File Tools
|
||||
- read_file: Read file content (use absolute path)
|
||||
- edit_file: Exact string replacement in file (must read first, provide unique old_string)
|
||||
- write_file: Create or overwrite file
|
||||
- read_file: Read file contents (use absolute paths)
|
||||
- edit_file: Replace exact strings in files (must read first, provide unique old_string)
|
||||
- write_file: Create or overwrite files
|
||||
- ls: List directory contents
|
||||
- glob: Find files by pattern (e.g., "**/*.py")
|
||||
- grep: Search file content
|
||||
- grep: Search file contents
|
||||
|
||||
Always use absolute paths starting with /.
|
||||
|
||||
@@ -100,12 +67,12 @@ Always use absolute paths starting with /.
|
||||
Search for documentation, error solutions, and code examples.
|
||||
|
||||
### http_request
|
||||
Sends HTTP requests to an API (GET, POST, etc.).
|
||||
Make HTTP requests to APIs (GET, POST, etc.).
|
||||
|
||||
## Code References
|
||||
When referencing code, use the following format: `file_path:line_number`
|
||||
When referencing code, use format: `file_path:line_number`
|
||||
|
||||
## Documentation
|
||||
- Do not create excessive markdown summary/documentation files after completing tasks.
|
||||
- Focus on the task itself, not documenting what you did.
|
||||
- Write documentation only when explicitly requested.
|
||||
- Do NOT create excessive markdown summary/documentation files after completing work
|
||||
- Focus on the work itself, not documenting what you did
|
||||
- Only create documentation when explicitly requested
|
||||
|
||||
@@ -1,672 +0,0 @@
|
||||
"""CLI를 위한 작업 실행 및 스트리밍 로직."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import sys
|
||||
import termios
|
||||
import tty
|
||||
|
||||
from langchain.agents.middleware.human_in_the_loop import (
|
||||
ActionRequest,
|
||||
ApproveDecision,
|
||||
Decision,
|
||||
HITLRequest,
|
||||
HITLResponse,
|
||||
RejectDecision,
|
||||
)
|
||||
from langchain_core.messages import HumanMessage, ToolMessage
|
||||
from langgraph.types import Command, Interrupt
|
||||
from pydantic import TypeAdapter, ValidationError
|
||||
from rich import box
|
||||
from rich.markdown import Markdown
|
||||
from rich.panel import Panel
|
||||
|
||||
from deepagents_cli.config import COLORS, console
|
||||
from deepagents_cli.file_ops import FileOpTracker, build_approval_preview
|
||||
from deepagents_cli.image_utils import create_multimodal_content
|
||||
from deepagents_cli.input import ImageTracker, parse_file_mentions
|
||||
from deepagents_cli.ui import (
|
||||
TokenTracker,
|
||||
format_tool_display,
|
||||
format_tool_message_content,
|
||||
render_diff_block,
|
||||
render_file_operation,
|
||||
render_todo_list,
|
||||
)
|
||||
|
||||
_HITL_REQUEST_ADAPTER = TypeAdapter(HITLRequest)
|
||||
|
||||
|
||||
def prompt_for_tool_approval(
|
||||
action_request: ActionRequest,
|
||||
assistant_id: str | None,
|
||||
) -> Decision | dict:
|
||||
"""방향키 탐색을 사용하여 도구 작업을 승인/거부하도록 사용자에게 묻습니다.
|
||||
|
||||
Returns:
|
||||
Decision (ApproveDecision 또는 RejectDecision) 또는
|
||||
자동 승인 모드로 전환하기 위한 {"type": "auto_approve_all"} dict
|
||||
"""
|
||||
description = action_request.get("description", "No description available")
|
||||
name = action_request["name"]
|
||||
args = action_request["args"]
|
||||
preview = build_approval_preview(name, args, assistant_id) if name else None
|
||||
|
||||
body_lines = []
|
||||
if preview:
|
||||
body_lines.append(f"[bold]{preview.title}[/bold]")
|
||||
body_lines.extend(preview.details)
|
||||
if preview.error:
|
||||
body_lines.append(f"[red]{preview.error}[/red]")
|
||||
else:
|
||||
body_lines.append(description)
|
||||
|
||||
# Display action info first
|
||||
console.print(
|
||||
Panel(
|
||||
"[bold yellow]⚠️ 도구 작업 승인 필요[/bold yellow]\n\n" + "\n".join(body_lines),
|
||||
border_style="yellow",
|
||||
box=box.ROUNDED,
|
||||
padding=(0, 1),
|
||||
)
|
||||
)
|
||||
if preview and preview.diff and not preview.error:
|
||||
console.print()
|
||||
render_diff_block(preview.diff, preview.diff_title or preview.title)
|
||||
|
||||
options = ["approve", "reject", "auto-accept all going forward"]
|
||||
selected = 0 # Start with approve selected
|
||||
|
||||
try:
|
||||
fd = sys.stdin.fileno()
|
||||
old_settings = termios.tcgetattr(fd)
|
||||
|
||||
try:
|
||||
tty.setraw(fd)
|
||||
# Hide cursor during menu interaction
|
||||
sys.stdout.write("\033[?25l")
|
||||
sys.stdout.flush()
|
||||
|
||||
# Initial render flag
|
||||
first_render = True
|
||||
|
||||
while True:
|
||||
if not first_render:
|
||||
# Move cursor back to start of menu (up 3 lines, then to start of line)
|
||||
sys.stdout.write("\033[3A\r")
|
||||
|
||||
first_render = False
|
||||
|
||||
# Display options vertically with ANSI color codes
|
||||
for i, option in enumerate(options):
|
||||
sys.stdout.write("\r\033[K") # Clear line from cursor to end
|
||||
|
||||
if i == selected:
|
||||
if option == "approve":
|
||||
# Green bold with filled checkbox
|
||||
sys.stdout.write("\033[1;32m☑ 승인 (Approve)\033[0m\n")
|
||||
elif option == "reject":
|
||||
# Red bold with filled checkbox
|
||||
sys.stdout.write("\033[1;31m☑ 거부 (Reject)\033[0m\n")
|
||||
else:
|
||||
# Blue bold with filled checkbox for auto-accept
|
||||
sys.stdout.write("\033[1;34m☑ 이후 모두 자동 승인 (Auto-accept all)\033[0m\n")
|
||||
elif option == "approve":
|
||||
# Dim with empty checkbox
|
||||
sys.stdout.write("\033[2m☐ 승인 (Approve)\033[0m\n")
|
||||
elif option == "reject":
|
||||
# Dim with empty checkbox
|
||||
sys.stdout.write("\033[2m☐ 거부 (Reject)\033[0m\n")
|
||||
else:
|
||||
# Dim with empty checkbox
|
||||
sys.stdout.write("\033[2m☐ 이후 모두 자동 승인 (Auto-accept all)\033[0m\n")
|
||||
|
||||
sys.stdout.flush()
|
||||
|
||||
# Read key
|
||||
char = sys.stdin.read(1)
|
||||
|
||||
if char == "\x1b": # ESC sequence (arrow keys)
|
||||
next1 = sys.stdin.read(1)
|
||||
next2 = sys.stdin.read(1)
|
||||
if next1 == "[":
|
||||
if next2 == "B": # Down arrow
|
||||
selected = (selected + 1) % len(options)
|
||||
elif next2 == "A": # Up arrow
|
||||
selected = (selected - 1) % len(options)
|
||||
elif char in {"\r", "\n"}: # Enter
|
||||
sys.stdout.write("\r\n") # Move to start of line and add newline
|
||||
break
|
||||
elif char == "\x03": # Ctrl+C
|
||||
sys.stdout.write("\r\n") # Move to start of line and add newline
|
||||
raise KeyboardInterrupt
|
||||
elif char.lower() == "a":
|
||||
selected = 0
|
||||
sys.stdout.write("\r\n") # Move to start of line and add newline
|
||||
break
|
||||
elif char.lower() == "r":
|
||||
selected = 1
|
||||
sys.stdout.write("\r\n") # Move to start of line and add newline
|
||||
break
|
||||
|
||||
finally:
|
||||
# Show cursor again
|
||||
sys.stdout.write("\033[?25h")
|
||||
sys.stdout.flush()
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
||||
|
||||
except (termios.error, AttributeError):
|
||||
# Fallback for non-Unix systems
|
||||
console.print(" ☐ (A)승인 (기본값)")
|
||||
console.print(" ☐ (R)거부")
|
||||
console.print(" ☐ (Auto)이후 모두 자동 승인")
|
||||
choice = input("\n선택 (A/R/Auto, 기본값=Approve): ").strip().lower()
|
||||
if choice in {"r", "reject"}:
|
||||
selected = 1
|
||||
elif choice in {"auto", "auto-accept"}:
|
||||
selected = 2
|
||||
else:
|
||||
selected = 0
|
||||
|
||||
# Return decision based on selection
|
||||
if selected == 0:
|
||||
return ApproveDecision(type="approve")
|
||||
if selected == 1:
|
||||
return RejectDecision(type="reject", message="User rejected the command")
|
||||
# Return special marker for auto-approve mode
|
||||
return {"type": "auto_approve_all"}
|
||||
|
||||
|
||||
async def execute_task(
|
||||
user_input: str,
|
||||
agent,
|
||||
assistant_id: str | None,
|
||||
session_state,
|
||||
token_tracker: TokenTracker | None = None,
|
||||
backend=None,
|
||||
image_tracker: ImageTracker | None = None,
|
||||
) -> None:
|
||||
"""모든 작업을 AI 에이전트에게 직접 전달하여 실행합니다."""
|
||||
# Parse file mentions and inject content if any
|
||||
prompt_text, mentioned_files = parse_file_mentions(user_input)
|
||||
|
||||
if mentioned_files:
|
||||
context_parts = [prompt_text, "\n\n## 참조된 파일 (Referenced Files)\n"]
|
||||
for file_path in mentioned_files:
|
||||
try:
|
||||
content = file_path.read_text()
|
||||
# Limit file content to reasonable size
|
||||
if len(content) > 50000:
|
||||
content = content[:50000] + "\n... (파일 잘림)"
|
||||
context_parts.append(f"\n### {file_path.name}\nPath: `{file_path}`\n```\n{content}\n```")
|
||||
except Exception as e:
|
||||
context_parts.append(f"\n### {file_path.name}\n[파일 읽기 오류: {e}]")
|
||||
|
||||
final_input = "\n".join(context_parts)
|
||||
else:
|
||||
final_input = prompt_text
|
||||
|
||||
# Include images in the message content
|
||||
images_to_send = []
|
||||
if image_tracker:
|
||||
images_to_send = image_tracker.get_images()
|
||||
if images_to_send:
|
||||
message_content = create_multimodal_content(final_input, images_to_send)
|
||||
else:
|
||||
message_content = final_input
|
||||
|
||||
config = {
|
||||
"configurable": {"thread_id": session_state.thread_id},
|
||||
"metadata": {"assistant_id": assistant_id} if assistant_id else {},
|
||||
}
|
||||
|
||||
has_responded = False
|
||||
captured_input_tokens = 0
|
||||
captured_output_tokens = 0
|
||||
current_todos = None # Track current todo list state
|
||||
|
||||
status = console.status(f"[bold {COLORS['thinking']}]에이전트가 생각 중...", spinner="dots")
|
||||
status.start()
|
||||
spinner_active = True
|
||||
|
||||
tool_icons = {
|
||||
"read_file": "📖",
|
||||
"write_file": "✏️",
|
||||
"edit_file": "✂️",
|
||||
"ls": "📁",
|
||||
"glob": "🔍",
|
||||
"grep": "🔎",
|
||||
"shell": "⚡",
|
||||
"execute": "🔧",
|
||||
"web_search": "🌐",
|
||||
"http_request": "🌍",
|
||||
"task": "🤖",
|
||||
"write_todos": "📋",
|
||||
}
|
||||
|
||||
file_op_tracker = FileOpTracker(assistant_id=assistant_id, backend=backend)
|
||||
|
||||
# Track which tool calls we've displayed to avoid duplicates
|
||||
displayed_tool_ids = set()
|
||||
# Buffer partial tool-call chunks keyed by streaming index
|
||||
tool_call_buffers: dict[str | int, dict] = {}
|
||||
# Buffer assistant text so we can render complete markdown segments
|
||||
pending_text = ""
|
||||
|
||||
def flush_text_buffer(*, final: bool = False) -> None:
|
||||
"""Flush accumulated assistant text as rendered markdown when appropriate."""
|
||||
nonlocal pending_text, spinner_active, has_responded
|
||||
if not final or not pending_text.strip():
|
||||
return
|
||||
if spinner_active:
|
||||
status.stop()
|
||||
spinner_active = False
|
||||
if not has_responded:
|
||||
console.print("●", style=COLORS["agent"], markup=False, end=" ")
|
||||
has_responded = True
|
||||
markdown = Markdown(pending_text.rstrip())
|
||||
console.print(markdown, style=COLORS["agent"])
|
||||
pending_text = ""
|
||||
|
||||
# Clear images from tracker after creating the message
|
||||
# (they've been encoded into the message content)
|
||||
if image_tracker:
|
||||
image_tracker.clear()
|
||||
|
||||
# Stream input - may need to loop if there are interrupts
|
||||
stream_input = {"messages": [{"role": "user", "content": message_content}]}
|
||||
|
||||
try:
|
||||
while True:
|
||||
interrupt_occurred = False
|
||||
hitl_response: dict[str, HITLResponse] = {}
|
||||
suppress_resumed_output = False
|
||||
# Track all pending interrupts: {interrupt_id: request_data}
|
||||
pending_interrupts: dict[str, HITLRequest] = {}
|
||||
|
||||
async for chunk in agent.astream(
|
||||
stream_input,
|
||||
stream_mode=["messages", "updates"], # Dual-mode for HITL support
|
||||
subgraphs=True,
|
||||
config=config,
|
||||
durability="exit",
|
||||
):
|
||||
# Unpack chunk - with subgraphs=True and dual-mode, it's (namespace, stream_mode, data)
|
||||
if not isinstance(chunk, tuple) or len(chunk) != 3:
|
||||
continue
|
||||
|
||||
_namespace, current_stream_mode, data = chunk
|
||||
|
||||
# Handle UPDATES stream - for interrupts and todos
|
||||
if current_stream_mode == "updates":
|
||||
if not isinstance(data, dict):
|
||||
continue
|
||||
|
||||
# Check for interrupts - collect ALL pending interrupts
|
||||
if "__interrupt__" in data:
|
||||
interrupts: list[Interrupt] = data["__interrupt__"]
|
||||
if interrupts:
|
||||
for interrupt_obj in interrupts:
|
||||
# Interrupt has required fields: value (HITLRequest) and id (str)
|
||||
# Validate the HITLRequest using TypeAdapter
|
||||
try:
|
||||
validated_request = _HITL_REQUEST_ADAPTER.validate_python(interrupt_obj.value)
|
||||
pending_interrupts[interrupt_obj.id] = validated_request
|
||||
interrupt_occurred = True
|
||||
except ValidationError as e:
|
||||
console.print(
|
||||
f"[yellow]경고: 유효하지 않은 HITL 요청 데이터: {e}[/yellow]",
|
||||
style="dim",
|
||||
)
|
||||
raise
|
||||
|
||||
# Extract chunk_data from updates for todo checking
|
||||
chunk_data = next(iter(data.values())) if data else None
|
||||
if chunk_data and isinstance(chunk_data, dict):
|
||||
# Check for todo updates
|
||||
if "todos" in chunk_data:
|
||||
new_todos = chunk_data["todos"]
|
||||
if new_todos != current_todos:
|
||||
current_todos = new_todos
|
||||
# Stop spinner before rendering todos
|
||||
if spinner_active:
|
||||
status.stop()
|
||||
spinner_active = False
|
||||
console.print()
|
||||
render_todo_list(new_todos)
|
||||
console.print()
|
||||
|
||||
# Handle MESSAGES stream - for content and tool calls
|
||||
elif current_stream_mode == "messages":
|
||||
# Messages stream returns (message, metadata) tuples
|
||||
if not isinstance(data, tuple) or len(data) != 2:
|
||||
continue
|
||||
|
||||
message, _metadata = data
|
||||
|
||||
if isinstance(message, HumanMessage):
|
||||
content = message.text
|
||||
if content:
|
||||
flush_text_buffer(final=True)
|
||||
if spinner_active:
|
||||
status.stop()
|
||||
spinner_active = False
|
||||
if not has_responded:
|
||||
console.print("●", style=COLORS["agent"], markup=False, end=" ")
|
||||
has_responded = True
|
||||
markdown = Markdown(content)
|
||||
console.print(markdown, style=COLORS["agent"])
|
||||
console.print()
|
||||
continue
|
||||
|
||||
if isinstance(message, ToolMessage):
|
||||
# Tool results are sent to the agent, not displayed to users
|
||||
# Exception: show shell command errors to help with debugging
|
||||
tool_name = getattr(message, "name", "")
|
||||
tool_status = getattr(message, "status", "success")
|
||||
tool_content = format_tool_message_content(message.content)
|
||||
record = file_op_tracker.complete_with_message(message)
|
||||
|
||||
# Reset spinner message after tool completes
|
||||
if spinner_active:
|
||||
status.update(f"[bold {COLORS['thinking']}]에이전트가 생각 중...")
|
||||
|
||||
if tool_name == "shell" and tool_status != "success":
|
||||
flush_text_buffer(final=True)
|
||||
if tool_content:
|
||||
if spinner_active:
|
||||
status.stop()
|
||||
spinner_active = False
|
||||
console.print()
|
||||
console.print(tool_content, style="red", markup=False)
|
||||
console.print()
|
||||
elif tool_content and isinstance(tool_content, str):
|
||||
stripped = tool_content.lstrip()
|
||||
if stripped.lower().startswith("error"):
|
||||
flush_text_buffer(final=True)
|
||||
if spinner_active:
|
||||
status.stop()
|
||||
spinner_active = False
|
||||
console.print()
|
||||
console.print(tool_content, style="red", markup=False)
|
||||
console.print()
|
||||
|
||||
if record:
|
||||
flush_text_buffer(final=True)
|
||||
if spinner_active:
|
||||
status.stop()
|
||||
spinner_active = False
|
||||
console.print()
|
||||
render_file_operation(record)
|
||||
console.print()
|
||||
if not spinner_active:
|
||||
status.start()
|
||||
spinner_active = True
|
||||
|
||||
# For all other tools (web_search, http_request, etc.),
|
||||
# results are hidden from user - agent will process and respond
|
||||
continue
|
||||
|
||||
# Check if this is an AIMessageChunk
|
||||
if not hasattr(message, "content_blocks"):
|
||||
# Fallback for messages without content_blocks
|
||||
continue
|
||||
|
||||
# Extract token usage if available
|
||||
if token_tracker and hasattr(message, "usage_metadata"):
|
||||
usage = message.usage_metadata
|
||||
if usage:
|
||||
input_toks = usage.get("input_tokens", 0)
|
||||
output_toks = usage.get("output_tokens", 0)
|
||||
if input_toks or output_toks:
|
||||
captured_input_tokens = max(captured_input_tokens, input_toks)
|
||||
captured_output_tokens = max(captured_output_tokens, output_toks)
|
||||
|
||||
# Process content blocks (this is the key fix!)
|
||||
for block in message.content_blocks:
|
||||
block_type = block.get("type")
|
||||
|
||||
# Handle text blocks
|
||||
if block_type == "text":
|
||||
text = block.get("text", "")
|
||||
if text:
|
||||
pending_text += text
|
||||
|
||||
# Handle reasoning blocks
|
||||
elif block_type == "reasoning":
|
||||
flush_text_buffer(final=True)
|
||||
reasoning = block.get("reasoning", "")
|
||||
if reasoning and spinner_active:
|
||||
status.stop()
|
||||
spinner_active = False
|
||||
# Could display reasoning differently if desired
|
||||
# For now, skip it or handle minimally
|
||||
|
||||
# Handle tool call chunks
|
||||
# Some models (OpenAI, Anthropic) stream tool_call_chunks
|
||||
# Others (Gemini) don't stream them and just return the full tool_call
|
||||
elif block_type in ("tool_call_chunk", "tool_call"):
|
||||
chunk_name = block.get("name")
|
||||
chunk_args = block.get("args")
|
||||
chunk_id = block.get("id")
|
||||
chunk_index = block.get("index")
|
||||
|
||||
# Use index as stable buffer key; fall back to id if needed
|
||||
buffer_key: str | int
|
||||
if chunk_index is not None:
|
||||
buffer_key = chunk_index
|
||||
elif chunk_id is not None:
|
||||
buffer_key = chunk_id
|
||||
else:
|
||||
buffer_key = f"unknown-{len(tool_call_buffers)}"
|
||||
|
||||
buffer = tool_call_buffers.setdefault(
|
||||
buffer_key,
|
||||
{"name": None, "id": None, "args": None, "args_parts": []},
|
||||
)
|
||||
|
||||
if chunk_name:
|
||||
buffer["name"] = chunk_name
|
||||
if chunk_id:
|
||||
buffer["id"] = chunk_id
|
||||
|
||||
if isinstance(chunk_args, dict):
|
||||
buffer["args"] = chunk_args
|
||||
buffer["args_parts"] = []
|
||||
elif isinstance(chunk_args, str):
|
||||
if chunk_args:
|
||||
parts: list[str] = buffer.setdefault("args_parts", [])
|
||||
if not parts or chunk_args != parts[-1]:
|
||||
parts.append(chunk_args)
|
||||
buffer["args"] = "".join(parts)
|
||||
elif chunk_args is not None:
|
||||
buffer["args"] = chunk_args
|
||||
|
||||
buffer_name = buffer.get("name")
|
||||
buffer_id = buffer.get("id")
|
||||
if buffer_name is None:
|
||||
continue
|
||||
|
||||
parsed_args = buffer.get("args")
|
||||
if isinstance(parsed_args, str):
|
||||
if not parsed_args:
|
||||
continue
|
||||
try:
|
||||
parsed_args = json.loads(parsed_args)
|
||||
except json.JSONDecodeError:
|
||||
# Wait for more chunks to form valid JSON
|
||||
continue
|
||||
elif parsed_args is None:
|
||||
continue
|
||||
|
||||
# Ensure args are in dict form for formatter
|
||||
if not isinstance(parsed_args, dict):
|
||||
parsed_args = {"value": parsed_args}
|
||||
|
||||
flush_text_buffer(final=True)
|
||||
if buffer_id is not None:
|
||||
if buffer_id not in displayed_tool_ids:
|
||||
displayed_tool_ids.add(buffer_id)
|
||||
file_op_tracker.start_operation(buffer_name, parsed_args, buffer_id)
|
||||
else:
|
||||
file_op_tracker.update_args(buffer_id, parsed_args)
|
||||
tool_call_buffers.pop(buffer_key, None)
|
||||
icon = tool_icons.get(buffer_name, "🔧")
|
||||
|
||||
if spinner_active:
|
||||
status.stop()
|
||||
|
||||
if has_responded:
|
||||
console.print()
|
||||
|
||||
display_str = format_tool_display(buffer_name, parsed_args)
|
||||
console.print(
|
||||
f" {icon} {display_str}",
|
||||
style=f"dim {COLORS['tool']}",
|
||||
markup=False,
|
||||
)
|
||||
|
||||
# Restart spinner with context about which tool is executing
|
||||
status.update(f"[bold {COLORS['thinking']}]{display_str} 실행 중...")
|
||||
status.start()
|
||||
spinner_active = True
|
||||
|
||||
if getattr(message, "chunk_position", None) == "last":
|
||||
flush_text_buffer(final=True)
|
||||
|
||||
# After streaming loop - handle interrupt if it occurred
|
||||
flush_text_buffer(final=True)
|
||||
|
||||
# Handle human-in-the-loop after stream completes
|
||||
if interrupt_occurred:
|
||||
any_rejected = False
|
||||
|
||||
for interrupt_id, hitl_request in pending_interrupts.items():
|
||||
# Check if auto-approve is enabled
|
||||
if session_state.auto_approve:
|
||||
# Auto-approve all commands without prompting
|
||||
decisions = []
|
||||
for action_request in hitl_request["action_requests"]:
|
||||
# Show what's being auto-approved (brief, dim message)
|
||||
if spinner_active:
|
||||
status.stop()
|
||||
spinner_active = False
|
||||
|
||||
description = action_request.get("description", "tool action")
|
||||
console.print()
|
||||
console.print(f" [dim]⚡ {description}[/dim]")
|
||||
|
||||
decisions.append({"type": "approve"})
|
||||
|
||||
hitl_response[interrupt_id] = {"decisions": decisions}
|
||||
|
||||
# Restart spinner for continuation
|
||||
if not spinner_active:
|
||||
status.start()
|
||||
spinner_active = True
|
||||
else:
|
||||
# Normal HITL flow - stop spinner and prompt user
|
||||
if spinner_active:
|
||||
status.stop()
|
||||
spinner_active = False
|
||||
|
||||
# Handle human-in-the-loop approval
|
||||
decisions = []
|
||||
for action_index, action_request in enumerate(hitl_request["action_requests"]):
|
||||
decision = prompt_for_tool_approval(
|
||||
action_request,
|
||||
assistant_id,
|
||||
)
|
||||
|
||||
# Check if user wants to switch to auto-approve mode
|
||||
if isinstance(decision, dict) and decision.get("type") == "auto_approve_all":
|
||||
# Switch to auto-approve mode
|
||||
session_state.auto_approve = True
|
||||
console.print()
|
||||
console.print("[bold blue]✓ 자동 승인 모드 활성화됨[/bold blue]")
|
||||
console.print("[dim]향후 모든 도구 작업이 자동으로 승인됩니다.[/dim]")
|
||||
console.print()
|
||||
|
||||
# Approve this action and all remaining actions in the batch
|
||||
decisions.append({"type": "approve"})
|
||||
for _remaining_action in hitl_request["action_requests"][action_index + 1 :]:
|
||||
decisions.append({"type": "approve"})
|
||||
break
|
||||
decisions.append(decision)
|
||||
|
||||
# Mark file operations as HIL-approved if user approved
|
||||
if decision.get("type") == "approve":
|
||||
tool_name = action_request.get("name")
|
||||
if tool_name in {"write_file", "edit_file"}:
|
||||
file_op_tracker.mark_hitl_approved(tool_name, action_request.get("args", {}))
|
||||
|
||||
if any(decision.get("type") == "reject" for decision in decisions):
|
||||
any_rejected = True
|
||||
|
||||
hitl_response[interrupt_id] = {"decisions": decisions}
|
||||
|
||||
suppress_resumed_output = any_rejected
|
||||
|
||||
if interrupt_occurred and hitl_response:
|
||||
if suppress_resumed_output:
|
||||
if spinner_active:
|
||||
status.stop()
|
||||
spinner_active = False
|
||||
|
||||
console.print("[yellow]명령이 거부되었습니다.[/yellow]", style="bold")
|
||||
console.print("에이전트에게 다르게 수행할 작업을 알려주세요.")
|
||||
console.print()
|
||||
return
|
||||
|
||||
# Resume the agent with the human decision
|
||||
stream_input = Command(resume=hitl_response)
|
||||
# Continue the while loop to restream
|
||||
else:
|
||||
# No interrupt, break out of while loop
|
||||
break
|
||||
|
||||
except asyncio.CancelledError:
|
||||
# Event loop cancelled the task (e.g. Ctrl+C during streaming) - clean up and return
|
||||
if spinner_active:
|
||||
status.stop()
|
||||
console.print("\n[yellow]사용자에 의해 중단됨[/yellow]")
|
||||
console.print("에이전트 상태 업데이트 중...", style="dim")
|
||||
|
||||
try:
|
||||
await agent.aupdate_state(
|
||||
config=config,
|
||||
values={"messages": [HumanMessage(content="[이전 요청이 시스템에 의해 취소되었습니다]")]},
|
||||
)
|
||||
console.print("다음 명령 준비 완료.\n", style="dim")
|
||||
except Exception as e:
|
||||
console.print(f"[red]경고: 에이전트 상태 업데이트 실패: {e}[/red]\n")
|
||||
|
||||
return
|
||||
|
||||
except KeyboardInterrupt:
|
||||
# User pressed Ctrl+C - clean up and exit gracefully
|
||||
if spinner_active:
|
||||
status.stop()
|
||||
console.print("\n[yellow]사용자에 의해 중단됨[/yellow]")
|
||||
console.print("에이전트 상태 업데이트 중...", style="dim")
|
||||
|
||||
# Inform the agent synchronously (in async context)
|
||||
try:
|
||||
await agent.aupdate_state(
|
||||
config=config,
|
||||
values={"messages": [HumanMessage(content="[사용자가 Ctrl+C로 이전 요청을 중단했습니다]")]},
|
||||
)
|
||||
console.print("다음 명령 준비 완료.\n", style="dim")
|
||||
except Exception as e:
|
||||
console.print(f"[red]경고: 에이전트 상태 업데이트 실패: {e}[/red]\n")
|
||||
|
||||
return
|
||||
|
||||
if spinner_active:
|
||||
status.stop()
|
||||
|
||||
if has_responded:
|
||||
console.print()
|
||||
# Track token usage (display only via /tokens command)
|
||||
if token_tracker and (captured_input_tokens or captured_output_tokens):
|
||||
token_tracker.add(captured_input_tokens, captured_output_tokens)
|
||||
@@ -1,4 +1,4 @@
|
||||
"""CLI 표시를 위한 파일 작업 추적 및 diff 계산 도움말."""
|
||||
"""Helpers for tracking file operations and computing diffs for CLI display."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -19,7 +19,7 @@ FileOpStatus = Literal["pending", "success", "error"]
|
||||
|
||||
@dataclass
|
||||
class ApprovalPreview:
|
||||
"""HITL 미리보기를 렌더링하는 데 사용되는 데이터."""
|
||||
"""Data used to render HITL previews."""
|
||||
|
||||
title: str
|
||||
details: list[str]
|
||||
@@ -29,7 +29,7 @@ class ApprovalPreview:
|
||||
|
||||
|
||||
def _safe_read(path: Path) -> str | None:
|
||||
"""파일 내용을 읽고, 실패 시 None을 반환합니다."""
|
||||
"""Read file content, returning None on failure."""
|
||||
try:
|
||||
return path.read_text()
|
||||
except (OSError, UnicodeDecodeError):
|
||||
@@ -37,7 +37,7 @@ def _safe_read(path: Path) -> str | None:
|
||||
|
||||
|
||||
def _count_lines(text: str) -> int:
|
||||
"""빈 문자열을 0줄로 취급하여 텍스트의 줄 수를 셉니다."""
|
||||
"""Count lines in text, treating empty strings as zero lines."""
|
||||
if not text:
|
||||
return 0
|
||||
return len(text.splitlines())
|
||||
@@ -51,17 +51,17 @@ def compute_unified_diff(
|
||||
max_lines: int | None = 800,
|
||||
context_lines: int = 3,
|
||||
) -> str | None:
|
||||
"""이전 내용과 이후 내용 간의 통합 diff를 계산합니다.
|
||||
"""Compute a unified diff between before and after content.
|
||||
|
||||
Args:
|
||||
before: 원본 내용
|
||||
after: 새로운 내용
|
||||
display_path: diff 헤더에 표시할 경로
|
||||
max_lines: 최대 diff 줄 수 (제한 없으면 None)
|
||||
context_lines: 변경 사항 주변의 컨텍스트 줄 수 (기본값 3)
|
||||
before: Original content
|
||||
after: New content
|
||||
display_path: Path for display in diff headers
|
||||
max_lines: Maximum number of diff lines (None for unlimited)
|
||||
context_lines: Number of context lines around changes (default 3)
|
||||
|
||||
Returns:
|
||||
통합 diff 문자열 또는 변경 사항이 없는 경우 None
|
||||
Unified diff string or None if no changes
|
||||
"""
|
||||
before_lines = before.splitlines()
|
||||
after_lines = after.splitlines()
|
||||
@@ -86,7 +86,7 @@ def compute_unified_diff(
|
||||
|
||||
@dataclass
|
||||
class FileOpMetrics:
|
||||
"""파일 작업에 대한 줄 및 바이트 수준 메트릭."""
|
||||
"""Line and byte level metrics for a file operation."""
|
||||
|
||||
lines_read: int = 0
|
||||
start_line: int | None = None
|
||||
@@ -99,7 +99,7 @@ class FileOpMetrics:
|
||||
|
||||
@dataclass
|
||||
class FileOperationRecord:
|
||||
"""단일 파일시스템 도구 호출을 추적합니다."""
|
||||
"""Track a single filesystem tool call."""
|
||||
|
||||
tool_name: str
|
||||
display_path: str
|
||||
@@ -117,7 +117,7 @@ class FileOperationRecord:
|
||||
|
||||
|
||||
def resolve_physical_path(path_str: str | None, assistant_id: str | None) -> Path | None:
|
||||
"""가상/상대 경로를 실제 파일시스템 경로로 변환합니다."""
|
||||
"""Convert a virtual/relative path to a physical filesystem path."""
|
||||
if not path_str:
|
||||
return None
|
||||
try:
|
||||
@@ -134,9 +134,9 @@ def resolve_physical_path(path_str: str | None, assistant_id: str | None) -> Pat
|
||||
|
||||
|
||||
def format_display_path(path_str: str | None) -> str:
|
||||
"""표시용으로 경로를 포맷합니다."""
|
||||
"""Format a path for display."""
|
||||
if not path_str:
|
||||
return "(알 수 없음)"
|
||||
return "(unknown)"
|
||||
try:
|
||||
path = Path(path_str)
|
||||
if path.is_absolute():
|
||||
@@ -151,7 +151,7 @@ def build_approval_preview(
|
||||
args: dict[str, Any],
|
||||
assistant_id: str | None,
|
||||
) -> ApprovalPreview | None:
|
||||
"""HITL 승인을 위한 요약 정보 및 diff를 수집합니다."""
|
||||
"""Collect summary info and diff for HITL approvals."""
|
||||
path_str = str(args.get("file_path") or args.get("path") or "")
|
||||
display_path = format_display_path(path_str)
|
||||
physical_path = resolve_physical_path(path_str, assistant_id)
|
||||
@@ -163,33 +163,37 @@ def build_approval_preview(
|
||||
diff = compute_unified_diff(before or "", after, display_path, max_lines=100)
|
||||
additions = 0
|
||||
if diff:
|
||||
additions = sum(1 for line in diff.splitlines() if line.startswith("+") and not line.startswith("+++"))
|
||||
additions = sum(
|
||||
1
|
||||
for line in diff.splitlines()
|
||||
if line.startswith("+") and not line.startswith("+++")
|
||||
)
|
||||
total_lines = _count_lines(after)
|
||||
details = [
|
||||
f"파일: {path_str}",
|
||||
"작업: 새 파일 생성" + (" (기존 내용 덮어씀)" if before else ""),
|
||||
f"작성할 줄 수: {additions or total_lines}",
|
||||
f"File: {path_str}",
|
||||
"Action: Create new file" + (" (overwrites existing content)" if before else ""),
|
||||
f"Lines to write: {additions or total_lines}",
|
||||
]
|
||||
return ApprovalPreview(
|
||||
title=f"{display_path} 쓰기",
|
||||
title=f"Write {display_path}",
|
||||
details=details,
|
||||
diff=diff,
|
||||
diff_title=f"{display_path} 차이(Diff)",
|
||||
diff_title=f"Diff {display_path}",
|
||||
)
|
||||
|
||||
if tool_name == "edit_file":
|
||||
if physical_path is None:
|
||||
return ApprovalPreview(
|
||||
title=f"{display_path} 업데이트",
|
||||
details=[f"파일: {path_str}", "작업: 텍스트 교체"],
|
||||
error="파일 경로를 확인할 수 없습니다.",
|
||||
title=f"Update {display_path}",
|
||||
details=[f"File: {path_str}", "Action: Replace text"],
|
||||
error="Unable to resolve file path.",
|
||||
)
|
||||
before = _safe_read(physical_path)
|
||||
if before is None:
|
||||
return ApprovalPreview(
|
||||
title=f"{display_path} 업데이트",
|
||||
details=[f"파일: {path_str}", "작업: 텍스트 교체"],
|
||||
error="현재 파일 내용을 읽을 수 없습니다.",
|
||||
title=f"Update {display_path}",
|
||||
details=[f"File: {path_str}", "Action: Replace text"],
|
||||
error="Unable to read current file contents.",
|
||||
)
|
||||
old_string = str(args.get("old_string", ""))
|
||||
new_string = str(args.get("new_string", ""))
|
||||
@@ -197,8 +201,8 @@ def build_approval_preview(
|
||||
replacement = perform_string_replacement(before, old_string, new_string, replace_all)
|
||||
if isinstance(replacement, str):
|
||||
return ApprovalPreview(
|
||||
title=f"{display_path} 업데이트",
|
||||
details=[f"파일: {path_str}", "작업: 텍스트 교체"],
|
||||
title=f"Update {display_path}",
|
||||
details=[f"File: {path_str}", "Action: Replace text"],
|
||||
error=replacement,
|
||||
)
|
||||
after, occurrences = replacement
|
||||
@@ -206,35 +210,45 @@ def build_approval_preview(
|
||||
additions = 0
|
||||
deletions = 0
|
||||
if diff:
|
||||
additions = sum(1 for line in diff.splitlines() if line.startswith("+") and not line.startswith("+++"))
|
||||
deletions = sum(1 for line in diff.splitlines() if line.startswith("-") and not line.startswith("---"))
|
||||
additions = sum(
|
||||
1
|
||||
for line in diff.splitlines()
|
||||
if line.startswith("+") and not line.startswith("+++")
|
||||
)
|
||||
deletions = sum(
|
||||
1
|
||||
for line in diff.splitlines()
|
||||
if line.startswith("-") and not line.startswith("---")
|
||||
)
|
||||
details = [
|
||||
f"파일: {path_str}",
|
||||
f"작업: 텍스트 교체 ({'모든 발생' if replace_all else '단일 발생'})",
|
||||
f"일치하는 발생: {occurrences}",
|
||||
f"변경된 줄: +{additions} / -{deletions}",
|
||||
f"File: {path_str}",
|
||||
f"Action: Replace text ({'all occurrences' if replace_all else 'single occurrence'})",
|
||||
f"Occurrences matched: {occurrences}",
|
||||
f"Lines changed: +{additions} / -{deletions}",
|
||||
]
|
||||
return ApprovalPreview(
|
||||
title=f"{display_path} 업데이트",
|
||||
title=f"Update {display_path}",
|
||||
details=details,
|
||||
diff=diff,
|
||||
diff_title=f"{display_path} 차이(Diff)",
|
||||
diff_title=f"Diff {display_path}",
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class FileOpTracker:
|
||||
"""CLI 상호작용 중 파일 작업 메트릭을 수집합니다."""
|
||||
"""Collect file operation metrics during a CLI interaction."""
|
||||
|
||||
def __init__(self, *, assistant_id: str | None, backend: BACKEND_TYPES | None = None) -> None:
|
||||
"""추적기를 초기화합니다."""
|
||||
"""Initialize the tracker."""
|
||||
self.assistant_id = assistant_id
|
||||
self.backend = backend
|
||||
self.active: dict[str | None, FileOperationRecord] = {}
|
||||
self.completed: list[FileOperationRecord] = []
|
||||
|
||||
def start_operation(self, tool_name: str, args: dict[str, Any], tool_call_id: str | None) -> None:
|
||||
def start_operation(
|
||||
self, tool_name: str, args: dict[str, Any], tool_call_id: str | None
|
||||
) -> None:
|
||||
if tool_name not in {"read_file", "write_file", "edit_file"}:
|
||||
return
|
||||
path_str = str(args.get("file_path") or args.get("path") or "")
|
||||
@@ -250,7 +264,11 @@ class FileOpTracker:
|
||||
if self.backend and path_str:
|
||||
try:
|
||||
responses = self.backend.download_files([path_str])
|
||||
if responses and responses[0].content is not None and responses[0].error is None:
|
||||
if (
|
||||
responses
|
||||
and responses[0].content is not None
|
||||
and responses[0].error is None
|
||||
):
|
||||
record.before_content = responses[0].content.decode("utf-8")
|
||||
else:
|
||||
record.before_content = ""
|
||||
@@ -261,7 +279,7 @@ class FileOpTracker:
|
||||
self.active[tool_call_id] = record
|
||||
|
||||
def update_args(self, tool_call_id: str, args: dict[str, Any]) -> None:
|
||||
"""활성 작업의 인수를 업데이트하고 before_content 캡처를 다시 시도합니다."""
|
||||
"""Update arguments for an active operation and retry capturing before_content."""
|
||||
record = self.active.get(tool_call_id)
|
||||
if not record:
|
||||
return
|
||||
@@ -277,7 +295,11 @@ class FileOpTracker:
|
||||
if self.backend:
|
||||
try:
|
||||
responses = self.backend.download_files([path_str])
|
||||
if responses and responses[0].content is not None and responses[0].error is None:
|
||||
if (
|
||||
responses
|
||||
and responses[0].content is not None
|
||||
and responses[0].error is None
|
||||
):
|
||||
record.before_content = responses[0].content.decode("utf-8")
|
||||
else:
|
||||
record.before_content = ""
|
||||
@@ -305,7 +327,9 @@ class FileOpTracker:
|
||||
else:
|
||||
content_text = str(content) if content is not None else ""
|
||||
|
||||
if getattr(tool_message, "status", "success") != "success" or content_text.lower().startswith("error"):
|
||||
if getattr(
|
||||
tool_message, "status", "success"
|
||||
) != "success" or content_text.lower().startswith("error"):
|
||||
record.status = "error"
|
||||
record.error = content_text
|
||||
self._finalize(record)
|
||||
@@ -335,7 +359,7 @@ class FileOpTracker:
|
||||
self._populate_after_content(record)
|
||||
if record.after_content is None:
|
||||
record.status = "error"
|
||||
record.error = "업데이트된 파일 내용을 읽을 수 없습니다."
|
||||
record.error = "Could not read updated file content."
|
||||
self._finalize(record)
|
||||
return record
|
||||
record.metrics.lines_written = _count_lines(record.after_content)
|
||||
@@ -348,8 +372,16 @@ class FileOpTracker:
|
||||
)
|
||||
record.diff = diff
|
||||
if diff:
|
||||
additions = sum(1 for line in diff.splitlines() if line.startswith("+") and not line.startswith("+++"))
|
||||
deletions = sum(1 for line in diff.splitlines() if line.startswith("-") and not line.startswith("---"))
|
||||
additions = sum(
|
||||
1
|
||||
for line in diff.splitlines()
|
||||
if line.startswith("+") and not line.startswith("+++")
|
||||
)
|
||||
deletions = sum(
|
||||
1
|
||||
for line in diff.splitlines()
|
||||
if line.startswith("-") and not line.startswith("---")
|
||||
)
|
||||
record.metrics.lines_added = additions
|
||||
record.metrics.lines_removed = deletions
|
||||
elif record.tool_name == "write_file" and (record.before_content or "") == "":
|
||||
@@ -369,7 +401,7 @@ class FileOpTracker:
|
||||
return record
|
||||
|
||||
def mark_hitl_approved(self, tool_name: str, args: dict[str, Any]) -> None:
|
||||
"""tool_name 및 file_path와 일치하는 작업을 HIL 승인됨으로 표시합니다."""
|
||||
"""Mark operations matching tool_name and file_path as HIL-approved."""
|
||||
file_path = args.get("file_path") or args.get("path")
|
||||
if not file_path:
|
||||
return
|
||||
@@ -388,7 +420,11 @@ class FileOpTracker:
|
||||
file_path = record.args.get("file_path") or record.args.get("path")
|
||||
if file_path:
|
||||
responses = self.backend.download_files([file_path])
|
||||
if responses and responses[0].content is not None and responses[0].error is None:
|
||||
if (
|
||||
responses
|
||||
and responses[0].content is not None
|
||||
and responses[0].error is None
|
||||
):
|
||||
record.after_content = responses[0].content.decode("utf-8")
|
||||
else:
|
||||
record.after_content = None
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""CLI를 위한 입력 처리, 완성 및 프롬프트 세션."""
|
||||
"""Input handling, completers, and prompt session for the CLI."""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
@@ -30,20 +30,20 @@ EXIT_CONFIRM_WINDOW = 3.0
|
||||
|
||||
|
||||
class ImageTracker:
|
||||
"""현재 대화에서 붙여넣은 이미지를 추적합니다."""
|
||||
"""Track pasted images in the current conversation."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.images: list[ImageData] = []
|
||||
self.next_id = 1
|
||||
|
||||
def add_image(self, image_data: ImageData) -> str:
|
||||
"""이미지를 추가하고 해당 자리 표시자 텍스트를 반환합니다.
|
||||
"""Add an image and return its placeholder text.
|
||||
|
||||
Args:
|
||||
image_data: 추적할 이미지 데이터
|
||||
image_data: The image data to track
|
||||
|
||||
Returns:
|
||||
"[image 1]"과 같은 자리 표시자 문자열
|
||||
Placeholder string like "[image 1]"
|
||||
"""
|
||||
placeholder = f"[image {self.next_id}]"
|
||||
image_data.placeholder = placeholder
|
||||
@@ -52,17 +52,17 @@ class ImageTracker:
|
||||
return placeholder
|
||||
|
||||
def get_images(self) -> list[ImageData]:
|
||||
"""추적된 모든 이미지를 가져옵니다."""
|
||||
"""Get all tracked images."""
|
||||
return self.images.copy()
|
||||
|
||||
def clear(self) -> None:
|
||||
"""추적된 모든 이미지를 지우고 카운터를 재설정합니다."""
|
||||
"""Clear all tracked images and reset counter."""
|
||||
self.images.clear()
|
||||
self.next_id = 1
|
||||
|
||||
|
||||
class FilePathCompleter(Completer):
|
||||
"""커서가 '@' 뒤에 있을 때만 파일시스템 완성을 활성화합니다."""
|
||||
"""Activate filesystem completion only when cursor is after '@'."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.path_completer = PathCompleter(
|
||||
@@ -72,7 +72,7 @@ class FilePathCompleter(Completer):
|
||||
)
|
||||
|
||||
def get_completions(self, document, complete_event):
|
||||
"""@가 감지되면 파일 경로 완성을 가져옵니다."""
|
||||
"""Get file path completions when @ is detected."""
|
||||
text = document.text_before_cursor
|
||||
|
||||
# Use regex to detect @path pattern at end of line
|
||||
@@ -110,10 +110,10 @@ class FilePathCompleter(Completer):
|
||||
|
||||
|
||||
class CommandCompleter(Completer):
|
||||
"""줄이 '/'로 시작할 때만 명령 완성을 활성화합니다."""
|
||||
"""Activate command completion only when line starts with '/'."""
|
||||
|
||||
def get_completions(self, document, _complete_event):
|
||||
"""/가 시작 부분에 있을 때 명령 완성을 가져옵니다."""
|
||||
"""Get command completions when / is at the start."""
|
||||
text = document.text_before_cursor
|
||||
|
||||
# Use regex to detect /command pattern at start of line
|
||||
@@ -135,7 +135,7 @@ class CommandCompleter(Completer):
|
||||
|
||||
|
||||
def parse_file_mentions(text: str) -> tuple[str, list[Path]]:
|
||||
"""@file 멘션을 추출하고 해결된 파일 경로가 포함된 정리된 텍스트를 반환합니다."""
|
||||
"""Extract @file mentions and return cleaned text with resolved file paths."""
|
||||
pattern = r"@((?:[^\s@]|(?<=\\)\s)+)" # Match @filename, allowing escaped spaces
|
||||
matches = re.findall(pattern, text)
|
||||
|
||||
@@ -154,21 +154,21 @@ def parse_file_mentions(text: str) -> tuple[str, list[Path]]:
|
||||
if path.exists() and path.is_file():
|
||||
files.append(path)
|
||||
else:
|
||||
console.print(f"[yellow]경고: 파일을 찾을 수 없습니다: {match}[/yellow]")
|
||||
console.print(f"[yellow]Warning: File not found: {match}[/yellow]")
|
||||
except Exception as e:
|
||||
console.print(f"[yellow]경고: 유효하지 않은 경로 {match}: {e}[/yellow]")
|
||||
console.print(f"[yellow]Warning: Invalid path {match}: {e}[/yellow]")
|
||||
|
||||
return text, files
|
||||
|
||||
|
||||
def parse_image_placeholders(text: str) -> tuple[str, int]:
|
||||
"""텍스트 내 이미지 자리 표시자 수를 셉니다.
|
||||
"""Count image placeholders in text.
|
||||
|
||||
Args:
|
||||
text: [image] 또는 [image N] 자리 표시자가 포함될 수 있는 입력 텍스트
|
||||
text: Input text potentially containing [image] or [image N] placeholders
|
||||
|
||||
Returns:
|
||||
이미지 자리 표시자 수가 포함된 (텍스트, 개수) 튜플
|
||||
Tuple of (text, count) where count is the number of image placeholders found
|
||||
"""
|
||||
# Match [image] or [image N] patterns
|
||||
pattern = r"\[image(?:\s+\d+)?\]"
|
||||
@@ -176,8 +176,10 @@ def parse_image_placeholders(text: str) -> tuple[str, int]:
|
||||
return text, len(matches)
|
||||
|
||||
|
||||
def get_bottom_toolbar(session_state: SessionState, session_ref: dict) -> Callable[[], list[tuple[str, str]]]:
|
||||
"""자동 승인 상태와 BASH 모드를 표시하는 툴바 함수를 반환합니다."""
|
||||
def get_bottom_toolbar(
|
||||
session_state: SessionState, session_ref: dict
|
||||
) -> Callable[[], list[tuple[str, str]]]:
|
||||
"""Return toolbar function that shows auto-approve status and BASH MODE."""
|
||||
|
||||
def toolbar() -> list[tuple[str, str]]:
|
||||
parts = []
|
||||
@@ -196,10 +198,10 @@ def get_bottom_toolbar(session_state: SessionState, session_ref: dict) -> Callab
|
||||
|
||||
# Base status message
|
||||
if session_state.auto_approve:
|
||||
base_msg = "자동 승인 켜짐 (CTRL+T로 전환)"
|
||||
base_msg = "auto-accept ON (CTRL+T to toggle)"
|
||||
base_class = "class:toolbar-green"
|
||||
else:
|
||||
base_msg = "수동 승인 (CTRL+T로 전환)"
|
||||
base_msg = "manual accept (CTRL+T to toggle)"
|
||||
base_class = "class:toolbar-orange"
|
||||
|
||||
parts.append((base_class, base_msg))
|
||||
@@ -210,7 +212,7 @@ def get_bottom_toolbar(session_state: SessionState, session_ref: dict) -> Callab
|
||||
now = time.monotonic()
|
||||
if now < hint_until:
|
||||
parts.append(("", " | "))
|
||||
parts.append(("class:toolbar-exit", " 종료하려면 Ctrl+C를 한번 더 누르세요 "))
|
||||
parts.append(("class:toolbar-exit", " Ctrl+C again to exit "))
|
||||
else:
|
||||
session_state.exit_hint_until = None
|
||||
|
||||
@@ -222,7 +224,7 @@ def get_bottom_toolbar(session_state: SessionState, session_ref: dict) -> Callab
|
||||
def create_prompt_session(
|
||||
_assistant_id: str, session_state: SessionState, image_tracker: ImageTracker | None = None
|
||||
) -> PromptSession:
|
||||
"""모든 기능이 구성된 PromptSession을 생성합니다."""
|
||||
"""Create a configured PromptSession with all features."""
|
||||
# Set default editor if not already set
|
||||
if "EDITOR" not in os.environ:
|
||||
os.environ["EDITOR"] = "nano"
|
||||
@@ -232,7 +234,7 @@ def create_prompt_session(
|
||||
|
||||
@kb.add("c-c")
|
||||
def _(event) -> None:
|
||||
"""종료하려면 짧은 시간 내에 Ctrl+C를 두 번 눌러야 합니다."""
|
||||
"""Require double Ctrl+C within a short window to exit."""
|
||||
app = event.app
|
||||
now = time.monotonic()
|
||||
|
||||
@@ -256,7 +258,10 @@ def create_prompt_session(
|
||||
app_ref = app
|
||||
|
||||
def clear_hint() -> None:
|
||||
if session_state.exit_hint_until is not None and time.monotonic() >= session_state.exit_hint_until:
|
||||
if (
|
||||
session_state.exit_hint_until is not None
|
||||
and time.monotonic() >= session_state.exit_hint_until
|
||||
):
|
||||
session_state.exit_hint_until = None
|
||||
session_state.exit_hint_handle = None
|
||||
app_ref.invalidate()
|
||||
@@ -268,7 +273,7 @@ def create_prompt_session(
|
||||
# Bind Ctrl+T to toggle auto-approve
|
||||
@kb.add("c-t")
|
||||
def _(event) -> None:
|
||||
"""자동 승인 모드를 토글합니다."""
|
||||
"""Toggle auto-approve mode."""
|
||||
session_state.toggle_auto_approve()
|
||||
# Force UI refresh to update toolbar
|
||||
event.app.invalidate()
|
||||
@@ -278,7 +283,7 @@ def create_prompt_session(
|
||||
from prompt_toolkit.keys import Keys
|
||||
|
||||
def _handle_paste_with_image_check(event, pasted_text: str = "") -> None:
|
||||
"""클립보드에서 이미지를 확인하고, 그렇지 않으면 붙여넣은 텍스트를 삽입합니다."""
|
||||
"""Check clipboard for image, otherwise insert pasted text."""
|
||||
# Try to get an image from clipboard
|
||||
clipboard_image = get_clipboard_image()
|
||||
|
||||
@@ -298,20 +303,20 @@ def create_prompt_session(
|
||||
|
||||
@kb.add(Keys.BracketedPaste)
|
||||
def _(event) -> None:
|
||||
"""브래킷 붙여넣기(macOS의 Cmd+V)를 처리합니다 - 이미지를 먼저 확인합니다."""
|
||||
"""Handle bracketed paste (Cmd+V on macOS) - check for images first."""
|
||||
# Bracketed paste provides the pasted text in event.data
|
||||
pasted_text = event.data if hasattr(event, "data") else ""
|
||||
_handle_paste_with_image_check(event, pasted_text)
|
||||
|
||||
@kb.add("c-v")
|
||||
def _(event) -> None:
|
||||
"""Ctrl+V 붙여넣기를 처리합니다 - 이미지를 먼저 확인합니다."""
|
||||
"""Handle Ctrl+V paste - check for images first."""
|
||||
_handle_paste_with_image_check(event)
|
||||
|
||||
# Bind regular Enter to submit (intuitive behavior)
|
||||
@kb.add("enter")
|
||||
def _(event) -> None:
|
||||
"""완성 메뉴가 활성화되지 않은 경우 Enter는 입력을 제출합니다."""
|
||||
"""Enter submits the input, unless completion menu is active."""
|
||||
buffer = event.current_buffer
|
||||
|
||||
# If completion menu is showing, apply the current completion
|
||||
@@ -340,19 +345,19 @@ def create_prompt_session(
|
||||
# Alt+Enter for newlines (press ESC then Enter, or Option+Enter on Mac)
|
||||
@kb.add("escape", "enter")
|
||||
def _(event) -> None:
|
||||
"""Alt+Enter는 여러 줄 입력을 위해 줄바꿈을 삽입합니다."""
|
||||
"""Alt+Enter inserts a newline for multi-line input."""
|
||||
event.current_buffer.insert_text("\n")
|
||||
|
||||
# Ctrl+E to open in external editor
|
||||
@kb.add("c-e")
|
||||
def _(event) -> None:
|
||||
"""현재 입력을 외부 편집기(기본값 nano)에서 엽니다."""
|
||||
"""Open the current input in an external editor (nano by default)."""
|
||||
event.current_buffer.open_in_editor()
|
||||
|
||||
# Backspace handler to retrigger completions and delete image tags as units
|
||||
@kb.add("backspace")
|
||||
def _(event) -> None:
|
||||
"""백스페이스 처리: 이미지 태그를 단일 단위로 삭제하고 완성을 다시 트리거합니다."""
|
||||
"""Handle backspace: delete image tags as single unit, retrigger completion."""
|
||||
buffer = event.current_buffer
|
||||
text_before = buffer.document.text_before_cursor
|
||||
|
||||
@@ -388,12 +393,14 @@ def create_prompt_session(
|
||||
from prompt_toolkit.styles import Style
|
||||
|
||||
# Define styles for the toolbar with full-width background colors
|
||||
toolbar_style = Style.from_dict({
|
||||
"bottom-toolbar": "noreverse", # Disable default reverse video
|
||||
"toolbar-green": "bg:#10b981 #000000", # Green for auto-accept ON
|
||||
"toolbar-orange": "bg:#f59e0b #000000", # Orange for manual accept
|
||||
"toolbar-exit": "bg:#2563eb #ffffff", # Blue for exit hint
|
||||
})
|
||||
toolbar_style = Style.from_dict(
|
||||
{
|
||||
"bottom-toolbar": "noreverse", # Disable default reverse video
|
||||
"toolbar-green": "bg:#10b981 #000000", # Green for auto-accept ON
|
||||
"toolbar-orange": "bg:#f59e0b #000000", # Orange for manual accept
|
||||
"toolbar-exit": "bg:#2563eb #ffffff", # Blue for exit hint
|
||||
}
|
||||
)
|
||||
|
||||
# Create session reference dict for toolbar to access session
|
||||
session_ref = {}
|
||||
@@ -409,7 +416,9 @@ def create_prompt_session(
|
||||
complete_in_thread=True, # Async completion prevents menu freezing
|
||||
mouse_support=False,
|
||||
enable_open_in_editor=True, # Allow Ctrl+X Ctrl+E to open external editor
|
||||
bottom_toolbar=get_bottom_toolbar(session_state, session_ref), # Persistent status bar at bottom
|
||||
bottom_toolbar=get_bottom_toolbar(
|
||||
session_state, session_ref
|
||||
), # Persistent status bar at bottom
|
||||
style=toolbar_style, # Apply toolbar styling
|
||||
reserve_space_for_menu=7, # Reserve space for completion menu to show 5-6 results
|
||||
)
|
||||
|
||||
@@ -1 +1 @@
|
||||
"""DeepAgents CLI를 위한 샌드박스 연동."""
|
||||
"""Sandbox integrations for DeepAgents CLI."""
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Daytona 샌드박스 백엔드 구현."""
|
||||
"""Daytona sandbox backend implementation."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -16,100 +16,102 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class DaytonaBackend(BaseSandbox):
|
||||
"""SandboxBackendProtocol을 준수하는 Daytona 백엔드 구현.
|
||||
"""Daytona backend implementation conforming to SandboxBackendProtocol.
|
||||
|
||||
이 구현은 BaseSandbox로부터 모든 파일 작업 메서드를 상속받으며,
|
||||
Daytona의 API를 사용하여 execute() 메서드만 구현합니다.
|
||||
This implementation inherits all file operation methods from BaseSandbox
|
||||
and only implements the execute() method using Daytona's API.
|
||||
"""
|
||||
|
||||
def __init__(self, sandbox: Sandbox) -> None:
|
||||
"""Daytona 샌드박스 클라이언트로 DaytonaBackend를 초기화합니다.
|
||||
"""Initialize the DaytonaBackend with a Daytona sandbox client.
|
||||
|
||||
Args:
|
||||
sandbox: Daytona 샌드박스 인스턴스
|
||||
sandbox: Daytona sandbox instance
|
||||
"""
|
||||
self._sandbox = sandbox
|
||||
self._timeout: int = 30 * 60 # 30분
|
||||
self._timeout: int = 30 * 60 # 30 mins
|
||||
|
||||
@property
|
||||
def id(self) -> str:
|
||||
"""샌드박스 백엔드의 고유 식별자."""
|
||||
"""Unique identifier for the sandbox backend."""
|
||||
return self._sandbox.id
|
||||
|
||||
def execute(
|
||||
self,
|
||||
command: str,
|
||||
) -> ExecuteResponse:
|
||||
"""샌드박스에서 명령을 실행하고 ExecuteResponse를 반환합니다.
|
||||
"""Execute a command in the sandbox and return ExecuteResponse.
|
||||
|
||||
Args:
|
||||
command: 실행할 전체 셸 명령 문자열.
|
||||
command: Full shell command string to execute.
|
||||
|
||||
Returns:
|
||||
결합된 출력, 종료 코드, 선택적 시그널 및 잘림 플래그가 포함된 ExecuteResponse.
|
||||
ExecuteResponse with combined output, exit code, optional signal, and truncation flag.
|
||||
"""
|
||||
result = self._sandbox.process.exec(command, timeout=self._timeout)
|
||||
|
||||
return ExecuteResponse(
|
||||
output=result.result, # Daytona는 stdout/stderr를 결합함
|
||||
output=result.result, # Daytona combines stdout/stderr
|
||||
exit_code=result.exit_code,
|
||||
truncated=False,
|
||||
)
|
||||
|
||||
def download_files(self, paths: list[str]) -> list[FileDownloadResponse]:
|
||||
"""Daytona 샌드박스에서 여러 파일을 다운로드합니다.
|
||||
"""Download multiple files from the Daytona sandbox.
|
||||
|
||||
효율성을 위해 Daytona의 네이티브 일괄 다운로드 API를 활용합니다.
|
||||
부분적인 성공을 지원하므로 개별 다운로드가 다른 다운로드에 영향을 주지 않고 실패할 수 있습니다.
|
||||
Leverages Daytona's native batch download API for efficiency.
|
||||
Supports partial success - individual downloads may fail without
|
||||
affecting others.
|
||||
|
||||
Args:
|
||||
paths: 다운로드할 파일 경로 목록.
|
||||
paths: List of file paths to download.
|
||||
|
||||
Returns:
|
||||
입력 경로당 하나씩 FileDownloadResponse 객체 목록.
|
||||
응답 순서는 입력 순서와 일치합니다.
|
||||
List of FileDownloadResponse objects, one per input path.
|
||||
Response order matches input order.
|
||||
|
||||
TODO: Daytona API 오류 문자열을 표준화된 FileOperationError 코드로 매핑해야 합니다.
|
||||
현재는 정상적인 동작(happy path)만 구현되어 있습니다.
|
||||
TODO: Map Daytona API error strings to standardized FileOperationError codes.
|
||||
Currently only implements happy path.
|
||||
"""
|
||||
from daytona import FileDownloadRequest
|
||||
|
||||
# Daytona의 네이티브 일괄 API를 사용하여 일괄 다운로드 요청 생성
|
||||
# Create batch download request using Daytona's native batch API
|
||||
download_requests = [FileDownloadRequest(source=path) for path in paths]
|
||||
daytona_responses = self._sandbox.fs.download_files(download_requests)
|
||||
|
||||
# Daytona 결과를 당사의 응답 형식으로 변환
|
||||
# TODO: 사용 가능한 경우 resp.error를 표준화된 오류 코드로 매핑
|
||||
# Convert Daytona results to our response format
|
||||
# TODO: Map resp.error to standardized error codes when available
|
||||
return [
|
||||
FileDownloadResponse(
|
||||
path=resp.source,
|
||||
content=resp.result,
|
||||
error=None, # TODO: resp.error를 FileOperationError로 매핑
|
||||
error=None, # TODO: map resp.error to FileOperationError
|
||||
)
|
||||
for resp in daytona_responses
|
||||
]
|
||||
|
||||
def upload_files(self, files: list[tuple[str, bytes]]) -> list[FileUploadResponse]:
|
||||
"""Daytona 샌드박스에 여러 파일을 업로드합니다.
|
||||
"""Upload multiple files to the Daytona sandbox.
|
||||
|
||||
효율성을 위해 Daytona의 네이티브 일괄 업로드 API를 활용합니다.
|
||||
부분적인 성공을 지원하므로 개별 업로드가 다른 업로드에 영향을 주지 않고 실패할 수 있습니다.
|
||||
Leverages Daytona's native batch upload API for efficiency.
|
||||
Supports partial success - individual uploads may fail without
|
||||
affecting others.
|
||||
|
||||
Args:
|
||||
files: 업로드할 (경로, 내용) 튜플 목록.
|
||||
files: List of (path, content) tuples to upload.
|
||||
|
||||
Returns:
|
||||
입력 파일당 하나씩 FileUploadResponse 객체 목록.
|
||||
응답 순서는 입력 순서와 일치합니다.
|
||||
List of FileUploadResponse objects, one per input file.
|
||||
Response order matches input order.
|
||||
|
||||
TODO: Daytona API 오류 문자열을 표준화된 FileOperationError 코드로 매핑해야 합니다.
|
||||
현재는 정상적인 동작(happy path)만 구현되어 있습니다.
|
||||
TODO: Map Daytona API error strings to standardized FileOperationError codes.
|
||||
Currently only implements happy path.
|
||||
"""
|
||||
from daytona import FileUpload
|
||||
|
||||
# Daytona의 네이티브 일괄 API를 사용하여 일괄 업로드 요청 생성
|
||||
# Create batch upload request using Daytona's native batch API
|
||||
upload_requests = [FileUpload(source=content, destination=path) for path, content in files]
|
||||
self._sandbox.fs.upload_files(upload_requests)
|
||||
|
||||
# TODO: Daytona가 오류 정보를 반환하는지 확인하고 FileOperationError 코드로 매핑
|
||||
# TODO: Check if Daytona returns error info and map to FileOperationError codes
|
||||
return [FileUploadResponse(path=path, error=None) for path, _ in files]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Modal 샌드박스 백엔드 구현."""
|
||||
"""Modal sandbox backend implementation."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -16,49 +16,49 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class ModalBackend(BaseSandbox):
|
||||
"""SandboxBackendProtocol을 준수하는 Modal 백엔드 구현.
|
||||
"""Modal backend implementation conforming to SandboxBackendProtocol.
|
||||
|
||||
이 구현은 BaseSandbox로부터 모든 파일 작업 메서드를 상속받으며,
|
||||
Modal의 API를 사용하여 execute() 메서드만 구현합니다.
|
||||
This implementation inherits all file operation methods from BaseSandbox
|
||||
and only implements the execute() method using Modal's API.
|
||||
"""
|
||||
|
||||
def __init__(self, sandbox: modal.Sandbox) -> None:
|
||||
"""Modal 샌드박스 인스턴스로 ModalBackend를 초기화합니다.
|
||||
"""Initialize the ModalBackend with a Modal sandbox instance.
|
||||
|
||||
Args:
|
||||
sandbox: 활성 Modal 샌드박스 인스턴스
|
||||
sandbox: Active Modal Sandbox instance
|
||||
"""
|
||||
self._sandbox = sandbox
|
||||
self._timeout = 30 * 60
|
||||
|
||||
@property
|
||||
def id(self) -> str:
|
||||
"""샌드박스 백엔드의 고유 식별자."""
|
||||
"""Unique identifier for the sandbox backend."""
|
||||
return self._sandbox.object_id
|
||||
|
||||
def execute(
|
||||
self,
|
||||
command: str,
|
||||
) -> ExecuteResponse:
|
||||
"""샌드박스에서 명령을 실행하고 ExecuteResponse를 반환합니다.
|
||||
"""Execute a command in the sandbox and return ExecuteResponse.
|
||||
|
||||
Args:
|
||||
command: 실행할 전체 셸 명령 문자열.
|
||||
command: Full shell command string to execute.
|
||||
|
||||
Returns:
|
||||
결합된 출력, 종료 코드 및 잘림 플래그가 포함된 ExecuteResponse.
|
||||
ExecuteResponse with combined output, exit code, and truncation flag.
|
||||
"""
|
||||
# Modal의 exec API를 사용하여 명령 실행
|
||||
# Execute command using Modal's exec API
|
||||
process = self._sandbox.exec("bash", "-c", command, timeout=self._timeout)
|
||||
|
||||
# 프로세스가 완료될 때까지 대기
|
||||
# Wait for process to complete
|
||||
process.wait()
|
||||
|
||||
# stdout 및 stderr 읽기
|
||||
# Read stdout and stderr
|
||||
stdout = process.stdout.read()
|
||||
stderr = process.stderr.read()
|
||||
|
||||
# stdout과 stderr 결합 (Runloop의 방식과 일치)
|
||||
# Combine stdout and stderr (matching Runloop's approach)
|
||||
output = stdout or ""
|
||||
if stderr:
|
||||
output += "\n" + stderr if output else stderr
|
||||
@@ -66,29 +66,30 @@ class ModalBackend(BaseSandbox):
|
||||
return ExecuteResponse(
|
||||
output=output,
|
||||
exit_code=process.returncode,
|
||||
truncated=False, # Modal은 잘림 정보를 제공하지 않음
|
||||
truncated=False, # Modal doesn't provide truncation info
|
||||
)
|
||||
|
||||
def download_files(self, paths: list[str]) -> list[FileDownloadResponse]:
|
||||
"""Modal 샌드박스에서 여러 파일을 다운로드합니다.
|
||||
"""Download multiple files from the Modal sandbox.
|
||||
|
||||
부분적인 성공을 지원하므로 개별 다운로드가 다른 다운로드에 영향을 주지 않고 실패할 수 있습니다.
|
||||
Supports partial success - individual downloads may fail without
|
||||
affecting others.
|
||||
|
||||
Args:
|
||||
paths: 다운로드할 파일 경로 목록.
|
||||
paths: List of file paths to download.
|
||||
|
||||
Returns:
|
||||
입력 경로당 하나씩 FileDownloadResponse 객체 목록.
|
||||
응답 순서는 입력 순서와 일치합니다.
|
||||
List of FileDownloadResponse objects, one per input path.
|
||||
Response order matches input order.
|
||||
|
||||
TODO: 표준화된 FileOperationError 코드를 사용하여 적절한 오류 처리를 구현해야 합니다.
|
||||
Modal의 sandbox.open()이 실제로 어떤 예외를 발생시키는지 확인이 필요합니다.
|
||||
현재는 정상적인 동작(happy path)만 구현되어 있습니다.
|
||||
TODO: Implement proper error handling with standardized FileOperationError codes.
|
||||
Need to determine what exceptions Modal's sandbox.open() actually raises.
|
||||
Currently only implements happy path.
|
||||
"""
|
||||
# 이 구현은 Modal 샌드박스 파일 API에 의존합니다.
|
||||
# This implementation relies on the Modal sandbox file API.
|
||||
# https://modal.com/doc/guide/sandbox-files
|
||||
# 이 API는 현재 알파 단계이며 프로덕션 용도로는 권장되지 않습니다.
|
||||
# CLI 애플리케이션을 대상으로 하므로 여기에서 사용하는 것은 괜찮습니다.
|
||||
# The API is currently in alpha and is not recommended for production use.
|
||||
# We're OK using it here as it's targeting the CLI application.
|
||||
responses = []
|
||||
for path in paths:
|
||||
with self._sandbox.open(path, "rb") as f:
|
||||
@@ -97,25 +98,26 @@ class ModalBackend(BaseSandbox):
|
||||
return responses
|
||||
|
||||
def upload_files(self, files: list[tuple[str, bytes]]) -> list[FileUploadResponse]:
|
||||
"""Modal 샌드박스에 여러 파일을 업로드합니다.
|
||||
"""Upload multiple files to the Modal sandbox.
|
||||
|
||||
부분적인 성공을 지원하므로 개별 업로드가 다른 업로드에 영향을 주지 않고 실패할 수 있습니다.
|
||||
Supports partial success - individual uploads may fail without
|
||||
affecting others.
|
||||
|
||||
Args:
|
||||
files: 업로드할 (경로, 내용) 튜플 목록.
|
||||
files: List of (path, content) tuples to upload.
|
||||
|
||||
Returns:
|
||||
입력 파일당 하나씩 FileUploadResponse 객체 목록.
|
||||
응답 순서는 입력 순서와 일치합니다.
|
||||
List of FileUploadResponse objects, one per input file.
|
||||
Response order matches input order.
|
||||
|
||||
TODO: 표준화된 FileOperationError 코드를 사용하여 적절한 오류 처리를 구현해야 합니다.
|
||||
Modal의 sandbox.open()이 실제로 어떤 예외를 발생시키는지 확인이 필요합니다.
|
||||
현재는 정상적인 동작(happy path)만 구현되어 있습니다.
|
||||
TODO: Implement proper error handling with standardized FileOperationError codes.
|
||||
Need to determine what exceptions Modal's sandbox.open() actually raises.
|
||||
Currently only implements happy path.
|
||||
"""
|
||||
# 이 구현은 Modal 샌드박스 파일 API에 의존합니다.
|
||||
# This implementation relies on the Modal sandbox file API.
|
||||
# https://modal.com/doc/guide/sandbox-files
|
||||
# 이 API는 현재 알파 단계이며 프로덕션 용도로는 권장되지 않습니다.
|
||||
# CLI 애플리케이션을 대상으로 하므로 여기에서 사용하는 것은 괜찮습니다.
|
||||
# The API is currently in alpha and is not recommended for production use.
|
||||
# We're OK using it here as it's targeting the CLI application.
|
||||
responses = []
|
||||
for path, content in files:
|
||||
with self._sandbox.open(path, "wb") as f:
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
"""Runloop을 위한 BackendProtocol 구현."""
|
||||
"""BackendProtocol implementation for Runloop."""
|
||||
|
||||
try:
|
||||
import runloop_api_client
|
||||
except ImportError:
|
||||
msg = (
|
||||
"RunloopBackend를 위해서는 runloop_api_client 패키지가 필요합니다. "
|
||||
"`pip install runloop_api_client`로 설치하십시오."
|
||||
"runloop_api_client package is required for RunloopBackend. "
|
||||
"Install with `pip install runloop_api_client`."
|
||||
)
|
||||
raise ImportError(msg)
|
||||
|
||||
@@ -17,10 +17,10 @@ from runloop_api_client import Runloop
|
||||
|
||||
|
||||
class RunloopBackend(BaseSandbox):
|
||||
"""Runloop devbox의 파일에서 작동하는 백엔드.
|
||||
"""Backend that operates on files in a Runloop devbox.
|
||||
|
||||
이 구현은 Runloop API 클라이언트를 사용하여 명령을 실행하고
|
||||
원격 devbox 환경 내에서 파일을 조작합니다.
|
||||
This implementation uses the Runloop API client to execute commands
|
||||
and manipulate files within a remote devbox environment.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -29,22 +29,22 @@ class RunloopBackend(BaseSandbox):
|
||||
client: Runloop | None = None,
|
||||
api_key: str | None = None,
|
||||
) -> None:
|
||||
"""Runloop 프로토콜을 초기화합니다.
|
||||
"""Initialize Runloop protocol.
|
||||
|
||||
Args:
|
||||
devbox_id: 작업할 Runloop devbox의 ID.
|
||||
client: 선택적인 기존 Runloop 클라이언트 인스턴스
|
||||
api_key: 새 클라이언트를 생성하기 위한 선택적 API 키
|
||||
(기본값은 RUNLOOP_API_KEY 환경 변수)
|
||||
devbox_id: ID of the Runloop devbox to operate on.
|
||||
client: Optional existing Runloop client instance
|
||||
api_key: Optional API key for creating a new client
|
||||
(defaults to RUNLOOP_API_KEY environment variable)
|
||||
"""
|
||||
if client and api_key:
|
||||
msg = "client 또는 bearer_token 중 하나만 제공해야 하며, 둘 다 제공할 수는 없습니다."
|
||||
msg = "Provide either client or bearer_token, not both."
|
||||
raise ValueError(msg)
|
||||
|
||||
if client is None:
|
||||
api_key = api_key or os.environ.get("RUNLOOP_API_KEY", None)
|
||||
if api_key is None:
|
||||
msg = "client 또는 bearer_token 중 하나는 제공되어야 합니다."
|
||||
msg = "Either client or bearer_token must be provided."
|
||||
raise ValueError(msg)
|
||||
client = Runloop(bearer_token=api_key)
|
||||
|
||||
@@ -54,27 +54,28 @@ class RunloopBackend(BaseSandbox):
|
||||
|
||||
@property
|
||||
def id(self) -> str:
|
||||
"""샌드박스 백엔드의 고유 식별자."""
|
||||
"""Unique identifier for the sandbox backend."""
|
||||
return self._devbox_id
|
||||
|
||||
def execute(
|
||||
self,
|
||||
command: str,
|
||||
) -> ExecuteResponse:
|
||||
"""devbox에서 명령을 실행하고 ExecuteResponse를 반환합니다.
|
||||
"""Execute a command in the devbox and return ExecuteResponse.
|
||||
|
||||
Args:
|
||||
command: 실행할 전체 셸 명령 문자열.
|
||||
command: Full shell command string to execute.
|
||||
timeout: Maximum execution time in seconds (default: 30 minutes).
|
||||
|
||||
Returns:
|
||||
결합된 출력, 종료 코드, 선택적 시그널 및 잘림 플래그가 포함된 ExecuteResponse.
|
||||
ExecuteResponse with combined output, exit code, optional signal, and truncation flag.
|
||||
"""
|
||||
result = self._client.devboxes.execute_and_await_completion(
|
||||
devbox_id=self._devbox_id,
|
||||
command=command,
|
||||
timeout=self._timeout,
|
||||
)
|
||||
# stdout과 stderr 결합
|
||||
# Combine stdout and stderr
|
||||
output = result.stdout or ""
|
||||
if result.stderr:
|
||||
output += "\n" + result.stderr if output else result.stderr
|
||||
@@ -82,21 +83,22 @@ class RunloopBackend(BaseSandbox):
|
||||
return ExecuteResponse(
|
||||
output=output,
|
||||
exit_code=result.exit_status,
|
||||
truncated=False, # Runloop는 잘림 정보를 제공하지 않음
|
||||
truncated=False, # Runloop doesn't provide truncation info
|
||||
)
|
||||
|
||||
def download_files(self, paths: list[str]) -> list[FileDownloadResponse]:
|
||||
"""Runloop devbox에서 여러 파일을 다운로드합니다.
|
||||
"""Download multiple files from the Runloop devbox.
|
||||
|
||||
Runloop API를 사용하여 파일을 개별적으로 다운로드합니다. 순서를 유지하고
|
||||
예외를 발생시키는 대신 파일별 오류를 보고하는 FileDownloadResponse 객체 목록을 반환합니다.
|
||||
Downloads files individually using the Runloop API. Returns a list of
|
||||
FileDownloadResponse objects preserving order and reporting per-file
|
||||
errors rather than raising exceptions.
|
||||
|
||||
TODO: 표준화된 FileOperationError 코드를 사용하여 적절한 오류 처리를 구현해야 합니다.
|
||||
현재는 정상적인 동작(happy path)만 구현되어 있습니다.
|
||||
TODO: Implement proper error handling with standardized FileOperationError codes.
|
||||
Currently only implements happy path.
|
||||
"""
|
||||
responses: list[FileDownloadResponse] = []
|
||||
for path in paths:
|
||||
# devboxes.download_file은 .read()를 노출하는 BinaryAPIResponse를 반환함
|
||||
# devboxes.download_file returns a BinaryAPIResponse which exposes .read()
|
||||
resp = self._client.devboxes.download_file(self._devbox_id, path=path)
|
||||
content = resp.read()
|
||||
responses.append(FileDownloadResponse(path=path, content=content, error=None))
|
||||
@@ -104,17 +106,18 @@ class RunloopBackend(BaseSandbox):
|
||||
return responses
|
||||
|
||||
def upload_files(self, files: list[tuple[str, bytes]]) -> list[FileUploadResponse]:
|
||||
"""Runloop devbox에 여러 파일을 업로드합니다.
|
||||
"""Upload multiple files to the Runloop devbox.
|
||||
|
||||
Runloop API를 사용하여 파일을 개별적으로 업로드합니다. 순서를 유지하고
|
||||
예외를 발생시키는 대신 파일별 오류를 보고하는 FileUploadResponse 객체 목록을 반환합니다.
|
||||
Uploads files individually using the Runloop API. Returns a list of
|
||||
FileUploadResponse objects preserving order and reporting per-file
|
||||
errors rather than raising exceptions.
|
||||
|
||||
TODO: 표준화된 FileOperationError 코드를 사용하여 적절한 오류 처리를 구현해야 합니다.
|
||||
현재는 정상적인 동작(happy path)만 구현되어 있습니다.
|
||||
TODO: Implement proper error handling with standardized FileOperationError codes.
|
||||
Currently only implements happy path.
|
||||
"""
|
||||
responses: list[FileUploadResponse] = []
|
||||
for path, content in files:
|
||||
# Runloop 클라이언트는 'file'을 바이트 또는 파일류 객체로 기대함
|
||||
# The Runloop client expects 'file' as bytes or a file-like object
|
||||
self._client.devboxes.upload_file(self._devbox_id, path=path, file=content)
|
||||
responses.append(FileUploadResponse(path=path, error=None))
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""컨텍스트 매니저를 통한 샌드박스 수명 주기 관리."""
|
||||
"""Sandbox lifecycle management with context managers."""
|
||||
|
||||
import os
|
||||
import shlex
|
||||
@@ -14,64 +14,64 @@ from deepagents_cli.config import console
|
||||
|
||||
|
||||
def _run_sandbox_setup(backend: SandboxBackendProtocol, setup_script_path: str) -> None:
|
||||
"""환경 변수 확장을 포함하여 샌드박스에서 사용자 설정 스크립트를 실행합니다.
|
||||
"""Run users setup script in sandbox with env var expansion.
|
||||
|
||||
Args:
|
||||
backend: 샌드박스 백엔드 인스턴스
|
||||
setup_script_path: 설정 스크립트 파일 경로
|
||||
backend: Sandbox backend instance
|
||||
setup_script_path: Path to setup script file
|
||||
"""
|
||||
script_path = Path(setup_script_path)
|
||||
if not script_path.exists():
|
||||
msg = f"설정 스크립트를 찾을 수 없습니다: {setup_script_path}"
|
||||
msg = f"Setup script not found: {setup_script_path}"
|
||||
raise FileNotFoundError(msg)
|
||||
|
||||
console.print(f"[dim]설정 스크립트 실행 중: {setup_script_path}...[/dim]")
|
||||
console.print(f"[dim]Running setup script: {setup_script_path}...[/dim]")
|
||||
|
||||
# 스크립트 내용 읽기
|
||||
# Read script content
|
||||
script_content = script_path.read_text()
|
||||
|
||||
# 로컬 환경을 사용하여 ${VAR} 구문 확장
|
||||
# Expand ${VAR} syntax using local environment
|
||||
template = string.Template(script_content)
|
||||
expanded_script = template.safe_substitute(os.environ)
|
||||
|
||||
# 5분 타임아웃으로 샌드박스에서 실행
|
||||
# Execute in sandbox with 5-minute timeout
|
||||
result = backend.execute(f"bash -c {shlex.quote(expanded_script)}")
|
||||
|
||||
if result.exit_code != 0:
|
||||
console.print(f"[red]❌ 설정 스크립트 실패 (종료 코드 {result.exit_code}):[/red]")
|
||||
console.print(f"[red]❌ Setup script failed (exit {result.exit_code}):[/red]")
|
||||
console.print(f"[dim]{result.output}[/dim]")
|
||||
msg = "설정 실패 - 중단됨"
|
||||
msg = "Setup failed - aborting"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
console.print("[green]✓ 설정 완료[/green]")
|
||||
console.print("[green]✓ Setup complete[/green]")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def create_modal_sandbox(
|
||||
*, sandbox_id: str | None = None, setup_script_path: str | None = None
|
||||
) -> Generator[SandboxBackendProtocol, None, None]:
|
||||
"""Modal 샌드박스를 생성하거나 연결합니다.
|
||||
"""Create or connect to Modal sandbox.
|
||||
|
||||
Args:
|
||||
sandbox_id: 재사용할 기존 샌드박스 ID (선택 사항)
|
||||
setup_script_path: 샌드박스 시작 후 실행할 설정 스크립트 경로 (선택 사항)
|
||||
sandbox_id: Optional existing sandbox ID to reuse
|
||||
setup_script_path: Optional path to setup script to run after sandbox starts
|
||||
|
||||
Yields:
|
||||
(ModalBackend, sandbox_id)
|
||||
|
||||
Raises:
|
||||
ImportError: Modal SDK가 설치되지 않음
|
||||
Exception: 샌드박스 생성/연결 실패
|
||||
FileNotFoundError: 설정 스크립트를 찾을 수 없음
|
||||
RuntimeError: 설정 스크립트 실패
|
||||
ImportError: Modal SDK not installed
|
||||
Exception: Sandbox creation/connection failed
|
||||
FileNotFoundError: Setup script not found
|
||||
RuntimeError: Setup script failed
|
||||
"""
|
||||
import modal
|
||||
|
||||
from deepagents_cli.integrations.modal import ModalBackend
|
||||
|
||||
console.print("[yellow]Modal 샌드박스 시작 중...[/yellow]")
|
||||
console.print("[yellow]Starting Modal sandbox...[/yellow]")
|
||||
|
||||
# 임시 앱 생성 (종료 시 자동 정리)
|
||||
# Create ephemeral app (auto-cleans up on exit)
|
||||
app = modal.App("deepagents-sandbox")
|
||||
|
||||
with app.run():
|
||||
@@ -82,12 +82,12 @@ def create_modal_sandbox(
|
||||
sandbox = modal.Sandbox.create(app=app, workdir="/workspace")
|
||||
should_cleanup = True
|
||||
|
||||
# 실행될 때까지 폴링 (Modal에서 필요)
|
||||
for _ in range(90): # 180초 타임아웃 (90 * 2초)
|
||||
if sandbox.poll() is not None: # 샌드박스가 예기치 않게 종료됨
|
||||
msg = "시작 중 Modal 샌드박스가 예기치 않게 종료되었습니다"
|
||||
# Poll until running (Modal requires this)
|
||||
for _ in range(90): # 180s timeout (90 * 2s)
|
||||
if sandbox.poll() is not None: # Sandbox terminated unexpectedly
|
||||
msg = "Modal sandbox terminated unexpectedly during startup"
|
||||
raise RuntimeError(msg)
|
||||
# 간단한 명령을 시도하여 샌드박스가 준비되었는지 확인
|
||||
# Check if sandbox is ready by attempting a simple command
|
||||
try:
|
||||
process = sandbox.exec("echo", "ready", timeout=5)
|
||||
process.wait()
|
||||
@@ -97,15 +97,15 @@ def create_modal_sandbox(
|
||||
pass
|
||||
time.sleep(2)
|
||||
else:
|
||||
# 타임아웃 - 정리 및 실패 처리
|
||||
# Timeout - cleanup and fail
|
||||
sandbox.terminate()
|
||||
msg = "180초 이내에 Modal 샌드박스를 시작하지 못했습니다"
|
||||
msg = "Modal sandbox failed to start within 180 seconds"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
backend = ModalBackend(sandbox)
|
||||
console.print(f"[green]✓ Modal 샌드박스 준비 완료: {backend.id}[/green]")
|
||||
console.print(f"[green]✓ Modal sandbox ready: {backend.id}[/green]")
|
||||
|
||||
# 설정 스크립트가 제공된 경우 실행
|
||||
# Run setup script if provided
|
||||
if setup_script_path:
|
||||
_run_sandbox_setup(backend, setup_script_path)
|
||||
try:
|
||||
@@ -113,32 +113,32 @@ def create_modal_sandbox(
|
||||
finally:
|
||||
if should_cleanup:
|
||||
try:
|
||||
console.print(f"[dim]Modal 샌드박스 {sandbox_id} 종료 중...[/dim]")
|
||||
console.print(f"[dim]Terminating Modal sandbox {sandbox_id}...[/dim]")
|
||||
sandbox.terminate()
|
||||
console.print(f"[dim]✓ Modal 샌드박스 {sandbox_id} 종료됨[/dim]")
|
||||
console.print(f"[dim]✓ Modal sandbox {sandbox_id} terminated[/dim]")
|
||||
except Exception as e:
|
||||
console.print(f"[yellow]⚠ 정리 실패: {e}[/yellow]")
|
||||
console.print(f"[yellow]⚠ Cleanup failed: {e}[/yellow]")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def create_runloop_sandbox(
|
||||
*, sandbox_id: str | None = None, setup_script_path: str | None = None
|
||||
) -> Generator[SandboxBackendProtocol, None, None]:
|
||||
"""Runloop devbox를 생성하거나 연결합니다.
|
||||
"""Create or connect to Runloop devbox.
|
||||
|
||||
Args:
|
||||
sandbox_id: 재사용할 기존 devbox ID (선택 사항)
|
||||
setup_script_path: 샌드박스 시작 후 실행할 설정 스크립트 경로 (선택 사항)
|
||||
sandbox_id: Optional existing devbox ID to reuse
|
||||
setup_script_path: Optional path to setup script to run after sandbox starts
|
||||
|
||||
Yields:
|
||||
(RunloopBackend, devbox_id)
|
||||
|
||||
Raises:
|
||||
ImportError: Runloop SDK가 설치되지 않음
|
||||
ValueError: RUNLOOP_API_KEY가 설정되지 않음
|
||||
RuntimeError: 타임아웃 내에 devbox를 시작하지 못함
|
||||
FileNotFoundError: 설정 스크립트를 찾을 수 없음
|
||||
RuntimeError: 설정 스크립트 실패
|
||||
ImportError: Runloop SDK not installed
|
||||
ValueError: RUNLOOP_API_KEY not set
|
||||
RuntimeError: Devbox failed to start within timeout
|
||||
FileNotFoundError: Setup script not found
|
||||
RuntimeError: Setup script failed
|
||||
"""
|
||||
from runloop_api_client import Runloop
|
||||
|
||||
@@ -146,12 +146,12 @@ def create_runloop_sandbox(
|
||||
|
||||
bearer_token = os.environ.get("RUNLOOP_API_KEY")
|
||||
if not bearer_token:
|
||||
msg = "RUNLOOP_API_KEY 환경 변수가 설정되지 않았습니다"
|
||||
msg = "RUNLOOP_API_KEY environment variable not set"
|
||||
raise ValueError(msg)
|
||||
|
||||
client = Runloop(bearer_token=bearer_token)
|
||||
|
||||
console.print("[yellow]Runloop devbox 시작 중...[/yellow]")
|
||||
console.print("[yellow]Starting Runloop devbox...[/yellow]")
|
||||
|
||||
if sandbox_id:
|
||||
devbox = client.devboxes.retrieve(id=sandbox_id)
|
||||
@@ -161,23 +161,23 @@ def create_runloop_sandbox(
|
||||
sandbox_id = devbox.id
|
||||
should_cleanup = True
|
||||
|
||||
# 실행될 때까지 폴링 (Runloop에서 필요)
|
||||
for _ in range(90): # 180초 타임아웃 (90 * 2초)
|
||||
# Poll until running (Runloop requires this)
|
||||
for _ in range(90): # 180s timeout (90 * 2s)
|
||||
status = client.devboxes.retrieve(id=devbox.id)
|
||||
if status.status == "running":
|
||||
break
|
||||
time.sleep(2)
|
||||
else:
|
||||
# 타임아웃 - 정리 및 실패 처리
|
||||
# Timeout - cleanup and fail
|
||||
client.devboxes.shutdown(id=devbox.id)
|
||||
msg = "180초 이내에 devbox를 시작하지 못했습니다"
|
||||
msg = "Devbox failed to start within 180 seconds"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
console.print(f"[green]✓ Runloop devbox 준비 완료: {sandbox_id}[/green]")
|
||||
console.print(f"[green]✓ Runloop devbox ready: {sandbox_id}[/green]")
|
||||
|
||||
backend = RunloopBackend(devbox_id=devbox.id, client=client)
|
||||
|
||||
# 설정 스크립트가 제공된 경우 실행
|
||||
# Run setup script if provided
|
||||
if setup_script_path:
|
||||
_run_sandbox_setup(backend, setup_script_path)
|
||||
try:
|
||||
@@ -185,29 +185,29 @@ def create_runloop_sandbox(
|
||||
finally:
|
||||
if should_cleanup:
|
||||
try:
|
||||
console.print(f"[dim]Runloop devbox {sandbox_id} 종료 중...[/dim]")
|
||||
console.print(f"[dim]Shutting down Runloop devbox {sandbox_id}...[/dim]")
|
||||
client.devboxes.shutdown(id=devbox.id)
|
||||
console.print(f"[dim]✓ Runloop devbox {sandbox_id} 종료됨[/dim]")
|
||||
console.print(f"[dim]✓ Runloop devbox {sandbox_id} terminated[/dim]")
|
||||
except Exception as e:
|
||||
console.print(f"[yellow]⚠ 정리 실패: {e}[/yellow]")
|
||||
console.print(f"[yellow]⚠ Cleanup failed: {e}[/yellow]")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def create_daytona_sandbox(
|
||||
*, sandbox_id: str | None = None, setup_script_path: str | None = None
|
||||
) -> Generator[SandboxBackendProtocol, None, None]:
|
||||
"""Daytona 샌드박스를 생성합니다.
|
||||
"""Create Daytona sandbox.
|
||||
|
||||
Args:
|
||||
sandbox_id: 재사용할 기존 샌드박스 ID (선택 사항)
|
||||
setup_script_path: 샌드박스 시작 후 실행할 설정 스크립트 경로 (선택 사항)
|
||||
sandbox_id: Optional existing sandbox ID to reuse
|
||||
setup_script_path: Optional path to setup script to run after sandbox starts
|
||||
|
||||
Yields:
|
||||
(DaytonaBackend, sandbox_id)
|
||||
|
||||
Note:
|
||||
ID로 기존 Daytona 샌드박스에 연결하는 기능은 아직 지원되지 않을 수 있습니다.
|
||||
sandbox_id가 제공되면 NotImplementedError가 발생합니다.
|
||||
Connecting to existing Daytona sandbox by ID may not be supported yet.
|
||||
If sandbox_id is provided, this will raise NotImplementedError.
|
||||
"""
|
||||
from daytona import Daytona, DaytonaConfig
|
||||
|
||||
@@ -215,25 +215,25 @@ def create_daytona_sandbox(
|
||||
|
||||
api_key = os.environ.get("DAYTONA_API_KEY")
|
||||
if not api_key:
|
||||
msg = "DAYTONA_API_KEY 환경 변수가 설정되지 않았습니다"
|
||||
msg = "DAYTONA_API_KEY environment variable not set"
|
||||
raise ValueError(msg)
|
||||
|
||||
if sandbox_id:
|
||||
msg = (
|
||||
"ID로 기존 Daytona 샌드박스에 연결하는 기능은 아직 지원되지 않습니다. "
|
||||
"--sandbox-id를 생략하여 새 샌드박스를 생성하십시오."
|
||||
"Connecting to existing Daytona sandbox by ID not yet supported. "
|
||||
"Create a new sandbox by omitting --sandbox-id."
|
||||
)
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
console.print("[yellow]Daytona 샌드박스 시작 중...[/yellow]")
|
||||
console.print("[yellow]Starting Daytona sandbox...[/yellow]")
|
||||
|
||||
daytona = Daytona(DaytonaConfig(api_key=api_key))
|
||||
sandbox = daytona.create()
|
||||
sandbox_id = sandbox.id
|
||||
|
||||
# 실행될 때까지 폴링 (Daytona에서 필요)
|
||||
for _ in range(90): # 180초 타임아웃 (90 * 2초)
|
||||
# 간단한 명령을 시도하여 샌드박스가 준비되었는지 확인
|
||||
# Poll until running (Daytona requires this)
|
||||
for _ in range(90): # 180s timeout (90 * 2s)
|
||||
# Check if sandbox is ready by attempting a simple command
|
||||
try:
|
||||
result = sandbox.process.exec("echo ready", timeout=5)
|
||||
if result.exit_code == 0:
|
||||
@@ -243,30 +243,29 @@ def create_daytona_sandbox(
|
||||
time.sleep(2)
|
||||
else:
|
||||
try:
|
||||
# 가능한 경우 정리
|
||||
# Clean up if possible
|
||||
sandbox.delete()
|
||||
finally:
|
||||
msg = "180초 이내에 Daytona 샌드박스를 시작하지 못했습니다"
|
||||
msg = "Daytona sandbox failed to start within 180 seconds"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
backend = DaytonaBackend(sandbox)
|
||||
console.print(f"[green]✓ Daytona 샌드박스 준비 완료: {backend.id}[/green]")
|
||||
console.print(f"[green]✓ Daytona sandbox ready: {backend.id}[/green]")
|
||||
|
||||
# 설정 스크립트가 제공된 경우 실행
|
||||
# Run setup script if provided
|
||||
if setup_script_path:
|
||||
_run_sandbox_setup(backend, setup_script_path)
|
||||
try:
|
||||
yield backend
|
||||
finally:
|
||||
console.print(f"[dim]Daytona 샌드박스 {sandbox_id} 삭제 중...[/dim]")
|
||||
console.print(f"[dim]Deleting Daytona sandbox {sandbox_id}...[/dim]")
|
||||
try:
|
||||
sandbox.delete()
|
||||
console.print(f"[dim]✓ Daytona 샌드박스 {sandbox_id} 종료됨[/dim]")
|
||||
console.print(f"[dim]✓ Daytona sandbox {sandbox_id} terminated[/dim]")
|
||||
except Exception as e:
|
||||
console.print(f"[yellow]⚠ 정리 실패: {e}[/yellow]")
|
||||
console.print(f"[yellow]⚠ Cleanup failed: {e}[/yellow]")
|
||||
|
||||
|
||||
# 공급자별 작업 디렉토리 매핑
|
||||
_PROVIDER_TO_WORKING_DIR = {
|
||||
"modal": "/workspace",
|
||||
"runloop": "/home/user",
|
||||
@@ -274,7 +273,7 @@ _PROVIDER_TO_WORKING_DIR = {
|
||||
}
|
||||
|
||||
|
||||
# 샌드박스 유형과 해당 컨텍스트 매니저 팩토리 매핑
|
||||
# Mapping of sandbox types to their context manager factories
|
||||
_SANDBOX_PROVIDERS = {
|
||||
"modal": create_modal_sandbox,
|
||||
"runloop": create_runloop_sandbox,
|
||||
@@ -289,20 +288,24 @@ def create_sandbox(
|
||||
sandbox_id: str | None = None,
|
||||
setup_script_path: str | None = None,
|
||||
) -> Generator[SandboxBackendProtocol, None, None]:
|
||||
"""지정된 공급자의 샌드박스를 생성하거나 연결합니다.
|
||||
"""Create or connect to a sandbox of the specified provider.
|
||||
|
||||
이것은 적절한 공급자별 컨텍스트 매니저에 위임하는 샌드박스 생성을 위한 통합 인터페이스입니다.
|
||||
This is the unified interface for sandbox creation that delegates to
|
||||
the appropriate provider-specific context manager.
|
||||
|
||||
Args:
|
||||
provider: 샌드박스 공급자 ("modal", "runloop", "daytona")
|
||||
sandbox_id: 재사용할 기존 샌드박스 ID (선택 사항)
|
||||
setup_script_path: 샌드박스 시작 후 실행할 설정 스크립트 경로 (선택 사항)
|
||||
provider: Sandbox provider ("modal", "runloop", "daytona")
|
||||
sandbox_id: Optional existing sandbox ID to reuse
|
||||
setup_script_path: Optional path to setup script to run after sandbox starts
|
||||
|
||||
Yields:
|
||||
(SandboxBackend, sandbox_id)
|
||||
"""
|
||||
if provider not in _SANDBOX_PROVIDERS:
|
||||
msg = f"알 수 없는 샌드박스 공급자: {provider}. 사용 가능한 공급자: {', '.join(get_available_sandbox_types())}"
|
||||
msg = (
|
||||
f"Unknown sandbox provider: {provider}. "
|
||||
f"Available providers: {', '.join(get_available_sandbox_types())}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
sandbox_provider = _SANDBOX_PROVIDERS[provider]
|
||||
@@ -312,29 +315,29 @@ def create_sandbox(
|
||||
|
||||
|
||||
def get_available_sandbox_types() -> list[str]:
|
||||
"""사용 가능한 샌드박스 공급자 유형 목록을 가져옵니다.
|
||||
"""Get list of available sandbox provider types.
|
||||
|
||||
Returns:
|
||||
샌드박스 유형 이름 목록 (예: ["modal", "runloop", "daytona"])
|
||||
List of sandbox type names (e.g., ["modal", "runloop", "daytona"])
|
||||
"""
|
||||
return list(_SANDBOX_PROVIDERS.keys())
|
||||
|
||||
|
||||
def get_default_working_dir(provider: str) -> str:
|
||||
"""주어진 샌드박스 공급자의 기본 작업 디렉토리를 가져옵니다.
|
||||
"""Get the default working directory for a given sandbox provider.
|
||||
|
||||
Args:
|
||||
provider: 샌드박스 공급자 이름 ("modal", "runloop", "daytona")
|
||||
provider: Sandbox provider name ("modal", "runloop", "daytona")
|
||||
|
||||
Returns:
|
||||
기본 작업 디렉토리 경로 (문자열)
|
||||
Default working directory path as string
|
||||
|
||||
Raises:
|
||||
ValueError: 공급자를 알 수 없는 경우
|
||||
ValueError: If provider is unknown
|
||||
"""
|
||||
if provider in _PROVIDER_TO_WORKING_DIR:
|
||||
return _PROVIDER_TO_WORKING_DIR[provider]
|
||||
msg = f"알 수 없는 샌드박스 공급자: {provider}"
|
||||
msg = f"Unknown sandbox provider: {provider}"
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
|
||||
@@ -1,425 +1,244 @@
|
||||
"""DeepAgents를 위한 메인 진입점 및 CLI 루프."""
|
||||
"""Main entry point and CLI loop for deepagents."""
|
||||
# ruff: noqa: T201
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import contextlib
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from deepagents.backends.protocol import SandboxBackendProtocol
|
||||
|
||||
# Now safe to import agent (which imports LangChain modules)
|
||||
from deepagents_cli.agent import create_cli_agent, list_agents, reset_agent
|
||||
from deepagents_cli.commands import execute_bash_command, handle_command
|
||||
|
||||
# CRITICAL: Import config FIRST to set LANGSMITH_PROJECT before LangChain loads
|
||||
from deepagents_cli.config import (
|
||||
COLORS,
|
||||
DEEP_AGENTS_ASCII,
|
||||
SessionState,
|
||||
console,
|
||||
create_model,
|
||||
settings,
|
||||
)
|
||||
from deepagents_cli.execution import execute_task
|
||||
from deepagents_cli.input import ImageTracker, create_prompt_session
|
||||
from deepagents_cli.integrations.sandbox_factory import (
|
||||
create_sandbox,
|
||||
get_default_working_dir,
|
||||
from deepagents_cli.integrations.sandbox_factory import create_sandbox
|
||||
from deepagents_cli.sessions import (
|
||||
delete_thread_command,
|
||||
generate_thread_id,
|
||||
get_checkpointer,
|
||||
get_most_recent,
|
||||
get_thread_agent,
|
||||
list_threads_command,
|
||||
thread_exists,
|
||||
)
|
||||
from deepagents_cli.skills import execute_skills_command, setup_skills_parser
|
||||
from deepagents_cli.tools import fetch_url, http_request, web_search
|
||||
from deepagents_cli.ui import TokenTracker, show_help
|
||||
from deepagents_cli.ui import show_help
|
||||
|
||||
|
||||
def check_cli_dependencies() -> None:
|
||||
"""CLI 선택적 종속성이 설치되어 있는지 확인합니다."""
|
||||
"""Check if CLI optional dependencies are installed."""
|
||||
missing = []
|
||||
|
||||
try:
|
||||
import rich
|
||||
except ImportError:
|
||||
missing.append("rich")
|
||||
|
||||
try:
|
||||
import requests
|
||||
import requests # noqa: F401
|
||||
except ImportError:
|
||||
missing.append("requests")
|
||||
|
||||
try:
|
||||
import dotenv
|
||||
import dotenv # noqa: F401
|
||||
except ImportError:
|
||||
missing.append("python-dotenv")
|
||||
|
||||
try:
|
||||
import tavily
|
||||
import tavily # noqa: F401
|
||||
except ImportError:
|
||||
missing.append("tavily-python")
|
||||
|
||||
try:
|
||||
import prompt_toolkit
|
||||
import textual # noqa: F401
|
||||
except ImportError:
|
||||
missing.append("prompt-toolkit")
|
||||
missing.append("textual")
|
||||
|
||||
if missing:
|
||||
print("\n❌ 필수 CLI 종속성이 누락되었습니다!")
|
||||
print("\nDeepAgents CLI를 사용하려면 다음 패키지가 필요합니다:")
|
||||
print("\n❌ Missing required CLI dependencies!")
|
||||
print("\nThe following packages are required to use the deepagents CLI:")
|
||||
for pkg in missing:
|
||||
print(f" - {pkg}")
|
||||
print("\n다음 명령으로 설치하십시오:")
|
||||
print("\nPlease install them with:")
|
||||
print(" pip install deepagents[cli]")
|
||||
print("\n또는 모든 종속성을 설치하십시오:")
|
||||
print("\nOr install all dependencies:")
|
||||
print(" pip install 'deepagents[cli]'")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""명령줄 인수를 파싱합니다."""
|
||||
def parse_args() -> argparse.Namespace:
|
||||
"""Parse command line arguments."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="DeepAgents - AI 코딩 도우미",
|
||||
description="DeepAgents - AI Coding Assistant",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
add_help=False,
|
||||
)
|
||||
|
||||
subparsers = parser.add_subparsers(dest="command", help="실행할 명령")
|
||||
subparsers = parser.add_subparsers(dest="command", help="Command to run")
|
||||
|
||||
# List command
|
||||
subparsers.add_parser("list", help="사용 가능한 모든 에이전트 나열")
|
||||
subparsers.add_parser("list", help="List all available agents")
|
||||
|
||||
# Help command
|
||||
subparsers.add_parser("help", help="도움말 정보 표시")
|
||||
subparsers.add_parser("help", help="Show help information")
|
||||
|
||||
# Reset command
|
||||
reset_parser = subparsers.add_parser("reset", help="에이전트 초기화")
|
||||
reset_parser.add_argument("--agent", required=True, help="초기화할 에이전트 이름")
|
||||
reset_parser.add_argument("--target", dest="source_agent", help="다른 에이전트에서 프롬프트 복사")
|
||||
reset_parser = subparsers.add_parser("reset", help="Reset an agent")
|
||||
reset_parser.add_argument("--agent", required=True, help="Name of agent to reset")
|
||||
reset_parser.add_argument(
|
||||
"--target", dest="source_agent", help="Copy prompt from another agent"
|
||||
)
|
||||
|
||||
# Skills command - setup delegated to skills module
|
||||
setup_skills_parser(subparsers)
|
||||
|
||||
# Threads command
|
||||
threads_parser = subparsers.add_parser("threads", help="Manage conversation threads")
|
||||
threads_sub = threads_parser.add_subparsers(dest="threads_command")
|
||||
|
||||
# threads list
|
||||
threads_list = threads_sub.add_parser("list", help="List threads")
|
||||
threads_list.add_argument(
|
||||
"--agent", default=None, help="Filter by agent name (default: show all)"
|
||||
)
|
||||
threads_list.add_argument("--limit", type=int, default=20, help="Max threads (default: 20)")
|
||||
|
||||
# threads delete
|
||||
threads_delete = threads_sub.add_parser("delete", help="Delete a thread")
|
||||
threads_delete.add_argument("thread_id", help="Thread ID to delete")
|
||||
|
||||
# Default interactive mode
|
||||
parser.add_argument(
|
||||
"--agent",
|
||||
default="agent",
|
||||
help="별도의 메모리 저장소를 위한 에이전트 식별자 (기본값: agent).",
|
||||
help="Agent identifier for separate memory stores (default: agent).",
|
||||
)
|
||||
|
||||
# Thread resume argument - matches PR #638: -r for most recent, -r <ID> for specific
|
||||
parser.add_argument(
|
||||
"-r",
|
||||
"--resume",
|
||||
dest="resume_thread",
|
||||
nargs="?",
|
||||
const="__MOST_RECENT__",
|
||||
default=None,
|
||||
help="Resume thread: -r for most recent, -r <ID> for specific thread",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--model",
|
||||
help="사용할 모델 (예: claude-sonnet-4-5-20250929, gpt-5-mini, gemini-3-pro-preview). 모델 이름에서 공급자가 자동 감지됩니다.",
|
||||
help="Model to use (e.g., claude-sonnet-4-5-20250929, gpt-5-mini). "
|
||||
"Provider is auto-detected from model name.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--auto-approve",
|
||||
action="store_true",
|
||||
help="프롬프트 없이 도구 사용 자동 승인 (human-in-the-loop 비활성화)",
|
||||
help="Auto-approve tool usage without prompting (disables human-in-the-loop)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sandbox",
|
||||
choices=["none", "modal", "daytona", "runloop"],
|
||||
default="none",
|
||||
help="코드 실행을 위한 원격 샌드박스 (기본값: none - 로컬 전용)",
|
||||
help="Remote sandbox for code execution (default: none - local only)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sandbox-id",
|
||||
help="재사용할 기존 샌드박스 ID (생성 및 정리 건너뜀)",
|
||||
help="Existing sandbox ID to reuse (skips creation and cleanup)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sandbox-setup",
|
||||
help="생성 후 샌드박스에서 실행할 설정 스크립트 경로",
|
||||
help="Path to setup script to run in sandbox after creation",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-splash",
|
||||
action="store_true",
|
||||
help="시작 스플래시 화면 비활성화",
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
async def simple_cli(
|
||||
agent,
|
||||
assistant_id: str | None,
|
||||
session_state,
|
||||
baseline_tokens: int = 0,
|
||||
backend=None,
|
||||
sandbox_type: str | None = None,
|
||||
setup_script_path: str | None = None,
|
||||
no_splash: bool = False,
|
||||
) -> None:
|
||||
"""메인 CLI 루프.
|
||||
|
||||
Args:
|
||||
backend: 파일 작업을 위한 백엔드 (CompositeBackend)
|
||||
sandbox_type: 사용 중인 샌드박스 유형 (예: "modal", "runloop", "daytona").
|
||||
None인 경우 로컬 모드에서 실행.
|
||||
sandbox_id: 활성 샌드박스의 ID
|
||||
setup_script_path: 실행된 설정 스크립트 경로 (있는 경우)
|
||||
no_splash: True인 경우 시작 스플래시 화면 표시 건너뜀
|
||||
"""
|
||||
console.clear()
|
||||
if not no_splash:
|
||||
console.print(DEEP_AGENTS_ASCII, style=f"bold {COLORS['primary']}")
|
||||
console.print()
|
||||
|
||||
# Extract sandbox ID from backend if using sandbox mode
|
||||
sandbox_id: str | None = None
|
||||
if backend:
|
||||
from deepagents.backends.composite import CompositeBackend
|
||||
|
||||
# Check if it's a CompositeBackend with a sandbox default backend
|
||||
if isinstance(backend, CompositeBackend):
|
||||
if isinstance(backend.default, SandboxBackendProtocol):
|
||||
sandbox_id = backend.default.id
|
||||
elif isinstance(backend, SandboxBackendProtocol):
|
||||
sandbox_id = backend.id
|
||||
|
||||
# Display sandbox info persistently (survives console.clear())
|
||||
if sandbox_type and sandbox_id:
|
||||
console.print(f"[yellow]⚡ {sandbox_type.capitalize()} 샌드박스: {sandbox_id}[/yellow]")
|
||||
if setup_script_path:
|
||||
console.print(f"[green]✓ 설정 스크립트 ({setup_script_path}) 완료됨[/green]")
|
||||
console.print()
|
||||
|
||||
# Display model info
|
||||
if settings.model_name and settings.model_provider:
|
||||
provider_display = {
|
||||
"openai": "OpenAI",
|
||||
"anthropic": "Anthropic",
|
||||
"google": "Google",
|
||||
}.get(settings.model_provider, settings.model_provider)
|
||||
console.print(
|
||||
f"[green]✓ Model:[/green] {provider_display} → '{settings.model_name}'",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
console.print()
|
||||
|
||||
if not settings.has_tavily:
|
||||
console.print(
|
||||
"[yellow]⚠ 웹 검색 비활성화됨:[/yellow] TAVILY_API_KEY를 찾을 수 없습니다.",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
console.print(" 웹 검색을 활성화하려면 Tavily API 키를 설정하세요:", style=COLORS["dim"])
|
||||
console.print(" export TAVILY_API_KEY=your_api_key_here", style=COLORS["dim"])
|
||||
console.print(
|
||||
" 또는 .env 파일에 추가하세요. 키 발급: https://tavily.com",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
console.print()
|
||||
|
||||
if settings.has_deepagents_langchain_project:
|
||||
console.print(
|
||||
f"[green]✓ LangSmith 추적 활성화됨:[/green] Deepagents → '{settings.deepagents_langchain_project}'",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
if settings.user_langchain_project:
|
||||
console.print(f" [dim]사용자 코드 (shell) → '{settings.user_langchain_project}'[/dim]")
|
||||
console.print()
|
||||
|
||||
console.print("... 코딩 준비 완료! 무엇을 만들고 싶으신가요?", style=COLORS["agent"])
|
||||
|
||||
if sandbox_type:
|
||||
working_dir = get_default_working_dir(sandbox_type)
|
||||
console.print(f" [dim]로컬 CLI 디렉터리: {Path.cwd()}[/dim]")
|
||||
console.print(f" [dim]코드 실행: 원격 샌드박스 ({working_dir})[/dim]")
|
||||
else:
|
||||
console.print(f" [dim]작업 디렉터리: {Path.cwd()}[/dim]")
|
||||
|
||||
console.print()
|
||||
|
||||
if session_state.auto_approve:
|
||||
console.print(" [yellow]⚡ 자동 승인: 켜짐[/yellow] [dim](확인 없이 도구 실행)[/dim]")
|
||||
console.print()
|
||||
|
||||
# Localize modifier names and show key symbols (macOS vs others)
|
||||
if sys.platform == "darwin":
|
||||
tips = (
|
||||
" 팁: ⏎ Enter로 제출, ⌥ Option + ⏎ Enter로 줄바꿈 (또는 Esc+Enter), "
|
||||
"⌃E로 편집기 열기, ⌃T로 자동 승인 전환, ⌃C로 중단"
|
||||
)
|
||||
else:
|
||||
tips = (
|
||||
" 팁: Enter로 제출, Alt+Enter (또는 Esc+Enter)로 줄바꿈, "
|
||||
"Ctrl+E로 편집기 열기, Ctrl+T로 자동 승인 전환, Ctrl+C로 중단"
|
||||
)
|
||||
console.print(tips, style=f"dim {COLORS['dim']}")
|
||||
|
||||
console.print()
|
||||
|
||||
# Create prompt session, image tracker, and token tracker
|
||||
image_tracker = ImageTracker()
|
||||
session = create_prompt_session(assistant_id, session_state, image_tracker=image_tracker)
|
||||
token_tracker = TokenTracker()
|
||||
token_tracker.set_baseline(baseline_tokens)
|
||||
|
||||
while True:
|
||||
try:
|
||||
user_input = await session.prompt_async()
|
||||
if session_state.exit_hint_handle:
|
||||
session_state.exit_hint_handle.cancel()
|
||||
session_state.exit_hint_handle = None
|
||||
session_state.exit_hint_until = None
|
||||
user_input = user_input.strip()
|
||||
except EOFError:
|
||||
break
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n안녕히 가세요!", style=COLORS["primary"])
|
||||
break
|
||||
|
||||
if not user_input:
|
||||
continue
|
||||
|
||||
# Check for slash commands first
|
||||
if user_input.startswith("/"):
|
||||
result = handle_command(user_input, agent, token_tracker)
|
||||
if result == "exit":
|
||||
console.print("\n안녕히 가세요!", style=COLORS["primary"])
|
||||
break
|
||||
if result:
|
||||
# Command was handled, continue to next input
|
||||
continue
|
||||
|
||||
# Check for bash commands (!)
|
||||
if user_input.startswith("!"):
|
||||
execute_bash_command(user_input)
|
||||
continue
|
||||
|
||||
# Handle regular quit keywords
|
||||
if user_input.lower() in ["quit", "exit", "q"]:
|
||||
console.print("\n안녕히 가세요!", style=COLORS["primary"])
|
||||
break
|
||||
|
||||
await execute_task(
|
||||
user_input,
|
||||
agent,
|
||||
assistant_id,
|
||||
session_state,
|
||||
token_tracker,
|
||||
backend=backend,
|
||||
image_tracker=image_tracker,
|
||||
)
|
||||
|
||||
|
||||
async def _run_agent_session(
|
||||
model,
|
||||
async def run_textual_cli_async(
|
||||
assistant_id: str,
|
||||
session_state,
|
||||
sandbox_backend=None,
|
||||
sandbox_type: str | None = None,
|
||||
setup_script_path: str | None = None,
|
||||
) -> None:
|
||||
"""에이전트를 생성하고 CLI 세션을 실행하는 도우미.
|
||||
|
||||
샌드박스 모드와 로컬 모드 간의 중복을 피하기 위해 추출되었습니다.
|
||||
|
||||
Args:
|
||||
model: 사용할 LLM 모델
|
||||
assistant_id: 메모리 저장을 위한 에이전트 식별자
|
||||
session_state: 자동 승인 설정이 포함된 세션 상태
|
||||
sandbox_backend: 원격 실행을 위한 선택적 샌드박스 백엔드
|
||||
sandbox_type: 사용 중인 샌드박스 유형
|
||||
setup_script_path: 실행된 설정 스크립트 경로 (있는 경우)
|
||||
"""
|
||||
# Create agent with conditional tools
|
||||
tools = [http_request, fetch_url]
|
||||
if settings.has_tavily:
|
||||
tools.append(web_search)
|
||||
|
||||
agent, composite_backend = create_cli_agent(
|
||||
model=model,
|
||||
assistant_id=assistant_id,
|
||||
tools=tools,
|
||||
sandbox=sandbox_backend,
|
||||
sandbox_type=sandbox_type,
|
||||
auto_approve=session_state.auto_approve,
|
||||
)
|
||||
|
||||
# Calculate baseline token count for accurate token tracking
|
||||
from .agent import get_system_prompt
|
||||
from .token_utils import calculate_baseline_tokens
|
||||
|
||||
agent_dir = settings.get_agent_dir(assistant_id)
|
||||
system_prompt = get_system_prompt(assistant_id=assistant_id, sandbox_type=sandbox_type)
|
||||
baseline_tokens = calculate_baseline_tokens(model, agent_dir, system_prompt, assistant_id)
|
||||
|
||||
await simple_cli(
|
||||
agent,
|
||||
assistant_id,
|
||||
session_state,
|
||||
baseline_tokens,
|
||||
backend=composite_backend,
|
||||
sandbox_type=sandbox_type,
|
||||
setup_script_path=setup_script_path,
|
||||
no_splash=session_state.no_splash,
|
||||
)
|
||||
|
||||
|
||||
async def main(
|
||||
assistant_id: str,
|
||||
session_state,
|
||||
*,
|
||||
auto_approve: bool = False,
|
||||
sandbox_type: str = "none",
|
||||
sandbox_id: str | None = None,
|
||||
setup_script_path: str | None = None,
|
||||
model_name: str | None = None,
|
||||
thread_id: str | None = None,
|
||||
is_resumed: bool = False,
|
||||
) -> None:
|
||||
"""조건부 샌드박스 지원이 포함된 메인 진입점.
|
||||
"""Run the Textual CLI interface (async version).
|
||||
|
||||
Args:
|
||||
assistant_id: 메모리 저장을 위한 에이전트 식별자
|
||||
session_state: 자동 승인 설정이 포함된 세션 상태
|
||||
sandbox_type: 샌드박스 유형 ("none", "modal", "runloop", "daytona")
|
||||
sandbox_id: 재사용할 선택적 기존 샌드박스 ID
|
||||
setup_script_path: 샌드박스에서 실행할 선택적 설정 스크립트 경로
|
||||
model_name: 환경 변수 대신 사용할 선택적 모델 이름
|
||||
assistant_id: Agent identifier for memory storage
|
||||
auto_approve: Whether to auto-approve tool usage
|
||||
sandbox_type: Type of sandbox ("none", "modal", "runloop", "daytona")
|
||||
sandbox_id: Optional existing sandbox ID to reuse
|
||||
model_name: Optional model name to use
|
||||
thread_id: Thread ID to use (new or resumed)
|
||||
is_resumed: Whether this is a resumed session
|
||||
"""
|
||||
from deepagents_cli.app import run_textual_app
|
||||
|
||||
model = create_model(model_name)
|
||||
|
||||
# Branch 1: User wants a sandbox
|
||||
if sandbox_type != "none":
|
||||
# Try to create sandbox
|
||||
try:
|
||||
console.print()
|
||||
with create_sandbox(
|
||||
sandbox_type, sandbox_id=sandbox_id, setup_script_path=setup_script_path
|
||||
) as sandbox_backend:
|
||||
console.print(f"[yellow]⚡ 원격 실행 활성화됨 ({sandbox_type})[/yellow]")
|
||||
console.print()
|
||||
|
||||
await _run_agent_session(
|
||||
model,
|
||||
assistant_id,
|
||||
session_state,
|
||||
sandbox_backend,
|
||||
sandbox_type=sandbox_type,
|
||||
setup_script_path=setup_script_path,
|
||||
)
|
||||
except (ImportError, ValueError, RuntimeError, NotImplementedError) as e:
|
||||
# Sandbox creation failed - fail hard (no silent fallback)
|
||||
console.print()
|
||||
console.print("[red]❌ 샌드박스 생성 실패[/red]")
|
||||
console.print(f"[dim]{e}[/dim]")
|
||||
sys.exit(1)
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n\n[yellow]중단됨[/yellow]")
|
||||
sys.exit(0)
|
||||
except Exception as e:
|
||||
console.print(f"\n[bold red]❌ 오류:[/bold red] {e}\n")
|
||||
console.print_exception()
|
||||
sys.exit(1)
|
||||
|
||||
# Branch 2: User wants local mode (none or default)
|
||||
# Show thread info
|
||||
if is_resumed:
|
||||
console.print(f"[green]Resuming thread:[/green] {thread_id}")
|
||||
else:
|
||||
console.print(f"[dim]Thread: {thread_id}[/dim]")
|
||||
|
||||
# Use async context manager for checkpointer
|
||||
async with get_checkpointer() as checkpointer:
|
||||
# Create agent with conditional tools
|
||||
tools = [http_request, fetch_url]
|
||||
if settings.has_tavily:
|
||||
tools.append(web_search)
|
||||
|
||||
# Handle sandbox mode
|
||||
sandbox_backend = None
|
||||
sandbox_cm = None
|
||||
|
||||
if sandbox_type != "none":
|
||||
try:
|
||||
# Create sandbox context manager but keep it open
|
||||
sandbox_cm = create_sandbox(sandbox_type, sandbox_id=sandbox_id)
|
||||
sandbox_backend = sandbox_cm.__enter__()
|
||||
except (ImportError, ValueError, RuntimeError, NotImplementedError) as e:
|
||||
console.print()
|
||||
console.print("[red]❌ Sandbox creation failed[/red]")
|
||||
console.print(f"[dim]{e}[/dim]")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
await _run_agent_session(model, assistant_id, session_state, sandbox_backend=None)
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n\n[yellow]중단됨[/yellow]")
|
||||
sys.exit(0)
|
||||
agent, composite_backend = create_cli_agent(
|
||||
model=model,
|
||||
assistant_id=assistant_id,
|
||||
tools=tools,
|
||||
sandbox=sandbox_backend,
|
||||
sandbox_type=sandbox_type if sandbox_type != "none" else None,
|
||||
auto_approve=auto_approve,
|
||||
checkpointer=checkpointer,
|
||||
)
|
||||
|
||||
# Run Textual app
|
||||
await run_textual_app(
|
||||
agent=agent,
|
||||
assistant_id=assistant_id,
|
||||
backend=composite_backend,
|
||||
auto_approve=auto_approve,
|
||||
cwd=Path.cwd(),
|
||||
thread_id=thread_id,
|
||||
)
|
||||
except Exception as e:
|
||||
console.print(f"\n[bold red]❌ 오류:[/bold red] {e}\n")
|
||||
console.print_exception()
|
||||
console.print(f"[red]❌ Failed to create agent: {e}[/red]")
|
||||
sys.exit(1)
|
||||
finally:
|
||||
# Clean up sandbox if we created one
|
||||
if sandbox_cm is not None:
|
||||
with contextlib.suppress(Exception):
|
||||
sandbox_cm.__exit__(None, None, None)
|
||||
|
||||
|
||||
def cli_main() -> None:
|
||||
"""콘솔 스크립트 진입점."""
|
||||
"""Entry point for console script."""
|
||||
# Fix for gRPC fork issue on macOS
|
||||
# https://github.com/grpc/grpc/issues/37642
|
||||
if sys.platform == "darwin":
|
||||
@@ -443,24 +262,76 @@ def cli_main() -> None:
|
||||
reset_agent(args.agent, args.source_agent)
|
||||
elif args.command == "skills":
|
||||
execute_skills_command(args)
|
||||
elif args.command == "threads":
|
||||
if args.threads_command == "list":
|
||||
asyncio.run(
|
||||
list_threads_command(
|
||||
agent_name=getattr(args, "agent", None),
|
||||
limit=getattr(args, "limit", 20),
|
||||
)
|
||||
)
|
||||
elif args.threads_command == "delete":
|
||||
asyncio.run(delete_thread_command(args.thread_id))
|
||||
else:
|
||||
console.print("[yellow]Usage: deepagents threads <list|delete>[/yellow]")
|
||||
else:
|
||||
# Create session state from args
|
||||
session_state = SessionState(auto_approve=args.auto_approve, no_splash=args.no_splash)
|
||||
# Interactive mode - handle thread resume
|
||||
thread_id = None
|
||||
is_resumed = False
|
||||
|
||||
# API key validation happens in create_model()
|
||||
if args.resume_thread == "__MOST_RECENT__":
|
||||
# -r (no ID): Get most recent thread
|
||||
# If --agent specified, filter by that agent; otherwise get most recent overall
|
||||
agent_filter = args.agent if args.agent != "agent" else None
|
||||
thread_id = asyncio.run(get_most_recent(agent_filter))
|
||||
if thread_id:
|
||||
is_resumed = True
|
||||
agent_name = asyncio.run(get_thread_agent(thread_id))
|
||||
if agent_name:
|
||||
args.agent = agent_name
|
||||
else:
|
||||
msg = (
|
||||
f"No previous thread for '{args.agent}'"
|
||||
if agent_filter
|
||||
else "No previous threads"
|
||||
)
|
||||
console.print(f"[yellow]{msg}, starting new.[/yellow]")
|
||||
|
||||
elif args.resume_thread:
|
||||
# -r <ID>: Resume specific thread
|
||||
if asyncio.run(thread_exists(args.resume_thread)):
|
||||
thread_id = args.resume_thread
|
||||
is_resumed = True
|
||||
if args.agent == "agent":
|
||||
agent_name = asyncio.run(get_thread_agent(thread_id))
|
||||
if agent_name:
|
||||
args.agent = agent_name
|
||||
else:
|
||||
console.print(f"[red]Thread '{args.resume_thread}' not found.[/red]")
|
||||
console.print(
|
||||
"[dim]Use 'deepagents threads list' to see available threads.[/dim]"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Generate new thread ID if not resuming
|
||||
if thread_id is None:
|
||||
thread_id = generate_thread_id()
|
||||
|
||||
# Run Textual CLI
|
||||
asyncio.run(
|
||||
main(
|
||||
args.agent,
|
||||
session_state,
|
||||
args.sandbox,
|
||||
args.sandbox_id,
|
||||
args.sandbox_setup,
|
||||
getattr(args, "model", None),
|
||||
run_textual_cli_async(
|
||||
assistant_id=args.agent,
|
||||
auto_approve=args.auto_approve,
|
||||
sandbox_type=args.sandbox,
|
||||
sandbox_id=args.sandbox_id,
|
||||
model_name=getattr(args, "model", None),
|
||||
thread_id=thread_id,
|
||||
is_resumed=is_resumed,
|
||||
)
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
# Clean exit on Ctrl+C - suppress ugly traceback
|
||||
console.print("\n\n[yellow]중단됨[/yellow]")
|
||||
console.print("\n\n[yellow]Interrupted[/yellow]")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,214 @@
|
||||
"""Thread management using LangGraph's built-in checkpoint persistence."""
|
||||
|
||||
import uuid
|
||||
from collections.abc import AsyncIterator
|
||||
from contextlib import asynccontextmanager
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
import aiosqlite
|
||||
from langgraph.checkpoint.sqlite.aio import AsyncSqliteSaver
|
||||
from rich.table import Table
|
||||
|
||||
from deepagents_cli.config import COLORS, console
|
||||
|
||||
# Patch aiosqlite.Connection to add is_alive() method required by langgraph-checkpoint>=2.1.0
|
||||
# See: https://github.com/langchain-ai/langgraph/issues/6583
|
||||
if not hasattr(aiosqlite.Connection, "is_alive"):
|
||||
|
||||
def _is_alive(self: aiosqlite.Connection) -> bool:
|
||||
"""Check if the connection is still alive."""
|
||||
return self._connection is not None
|
||||
|
||||
aiosqlite.Connection.is_alive = _is_alive
|
||||
|
||||
|
||||
def _format_timestamp(iso_timestamp: str | None) -> str:
|
||||
"""Format ISO timestamp for display (e.g., 'Dec 30, 6:10pm')."""
|
||||
if not iso_timestamp:
|
||||
return ""
|
||||
try:
|
||||
dt = datetime.fromisoformat(iso_timestamp).astimezone()
|
||||
return dt.strftime("%b %d, %-I:%M%p").lower().replace("am", "am").replace("pm", "pm")
|
||||
except (ValueError, TypeError):
|
||||
return ""
|
||||
|
||||
|
||||
def get_db_path() -> Path:
|
||||
"""Get path to global database."""
|
||||
db_dir = Path.home() / ".deepagents"
|
||||
db_dir.mkdir(parents=True, exist_ok=True)
|
||||
return db_dir / "sessions.db"
|
||||
|
||||
|
||||
def generate_thread_id() -> str:
|
||||
"""Generate a new 8-char hex thread ID."""
|
||||
return uuid.uuid4().hex[:8]
|
||||
|
||||
|
||||
async def _table_exists(conn: aiosqlite.Connection, table: str) -> bool:
|
||||
"""Check if a table exists in the database."""
|
||||
query = "SELECT 1 FROM sqlite_master WHERE type='table' AND name=?"
|
||||
async with conn.execute(query, (table,)) as cursor:
|
||||
return await cursor.fetchone() is not None
|
||||
|
||||
|
||||
async def list_threads(
|
||||
agent_name: str | None = None,
|
||||
limit: int = 20,
|
||||
) -> list[dict]:
|
||||
"""List threads from checkpoints table."""
|
||||
db_path = str(get_db_path())
|
||||
async with aiosqlite.connect(db_path, timeout=30.0) as conn:
|
||||
# Return empty if table doesn't exist yet (fresh install)
|
||||
if not await _table_exists(conn, "checkpoints"):
|
||||
return []
|
||||
|
||||
if agent_name:
|
||||
query = """
|
||||
SELECT thread_id,
|
||||
json_extract(metadata, '$.agent_name') as agent_name,
|
||||
MAX(json_extract(metadata, '$.updated_at')) as updated_at
|
||||
FROM checkpoints
|
||||
WHERE json_extract(metadata, '$.agent_name') = ?
|
||||
GROUP BY thread_id
|
||||
ORDER BY updated_at DESC
|
||||
LIMIT ?
|
||||
"""
|
||||
params: tuple = (agent_name, limit)
|
||||
else:
|
||||
query = """
|
||||
SELECT thread_id,
|
||||
json_extract(metadata, '$.agent_name') as agent_name,
|
||||
MAX(json_extract(metadata, '$.updated_at')) as updated_at
|
||||
FROM checkpoints
|
||||
GROUP BY thread_id
|
||||
ORDER BY updated_at DESC
|
||||
LIMIT ?
|
||||
"""
|
||||
params = (limit,)
|
||||
|
||||
async with conn.execute(query, params) as cursor:
|
||||
rows = await cursor.fetchall()
|
||||
return [{"thread_id": r[0], "agent_name": r[1], "updated_at": r[2]} for r in rows]
|
||||
|
||||
|
||||
async def get_most_recent(agent_name: str | None = None) -> str | None:
|
||||
"""Get most recent thread_id, optionally filtered by agent."""
|
||||
db_path = str(get_db_path())
|
||||
async with aiosqlite.connect(db_path, timeout=30.0) as conn:
|
||||
if not await _table_exists(conn, "checkpoints"):
|
||||
return None
|
||||
|
||||
if agent_name:
|
||||
query = """
|
||||
SELECT thread_id FROM checkpoints
|
||||
WHERE json_extract(metadata, '$.agent_name') = ?
|
||||
ORDER BY checkpoint_id DESC
|
||||
LIMIT 1
|
||||
"""
|
||||
params: tuple = (agent_name,)
|
||||
else:
|
||||
query = "SELECT thread_id FROM checkpoints ORDER BY checkpoint_id DESC LIMIT 1"
|
||||
params = ()
|
||||
|
||||
async with conn.execute(query, params) as cursor:
|
||||
row = await cursor.fetchone()
|
||||
return row[0] if row else None
|
||||
|
||||
|
||||
async def get_thread_agent(thread_id: str) -> str | None:
|
||||
"""Get agent_name for a thread."""
|
||||
db_path = str(get_db_path())
|
||||
async with aiosqlite.connect(db_path, timeout=30.0) as conn:
|
||||
if not await _table_exists(conn, "checkpoints"):
|
||||
return None
|
||||
|
||||
query = """
|
||||
SELECT json_extract(metadata, '$.agent_name')
|
||||
FROM checkpoints
|
||||
WHERE thread_id = ?
|
||||
LIMIT 1
|
||||
"""
|
||||
async with conn.execute(query, (thread_id,)) as cursor:
|
||||
row = await cursor.fetchone()
|
||||
return row[0] if row else None
|
||||
|
||||
|
||||
async def thread_exists(thread_id: str) -> bool:
|
||||
"""Check if a thread exists in checkpoints."""
|
||||
db_path = str(get_db_path())
|
||||
async with aiosqlite.connect(db_path, timeout=30.0) as conn:
|
||||
if not await _table_exists(conn, "checkpoints"):
|
||||
return False
|
||||
|
||||
query = "SELECT 1 FROM checkpoints WHERE thread_id = ? LIMIT 1"
|
||||
async with conn.execute(query, (thread_id,)) as cursor:
|
||||
row = await cursor.fetchone()
|
||||
return row is not None
|
||||
|
||||
|
||||
async def delete_thread(thread_id: str) -> bool:
|
||||
"""Delete thread checkpoints. Returns True if deleted."""
|
||||
db_path = str(get_db_path())
|
||||
async with aiosqlite.connect(db_path, timeout=30.0) as conn:
|
||||
if not await _table_exists(conn, "checkpoints"):
|
||||
return False
|
||||
|
||||
cursor = await conn.execute("DELETE FROM checkpoints WHERE thread_id = ?", (thread_id,))
|
||||
deleted = cursor.rowcount > 0
|
||||
if await _table_exists(conn, "writes"):
|
||||
await conn.execute("DELETE FROM writes WHERE thread_id = ?", (thread_id,))
|
||||
await conn.commit()
|
||||
return deleted
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def get_checkpointer() -> AsyncIterator[AsyncSqliteSaver]:
|
||||
"""Get AsyncSqliteSaver for the global database."""
|
||||
async with AsyncSqliteSaver.from_conn_string(str(get_db_path())) as checkpointer:
|
||||
yield checkpointer
|
||||
|
||||
|
||||
async def list_threads_command(
|
||||
agent_name: str | None = None,
|
||||
limit: int = 20,
|
||||
) -> None:
|
||||
"""CLI handler for: deepagents threads list."""
|
||||
threads = await list_threads(agent_name, limit=limit)
|
||||
|
||||
if not threads:
|
||||
if agent_name:
|
||||
console.print(f"[yellow]No threads found for agent '{agent_name}'.[/yellow]")
|
||||
else:
|
||||
console.print("[yellow]No threads found.[/yellow]")
|
||||
console.print("[dim]Start a conversation with: deepagents[/dim]")
|
||||
return
|
||||
|
||||
title = f"Threads for '{agent_name}'" if agent_name else "All Threads"
|
||||
|
||||
table = Table(title=title, show_header=True, header_style=f"bold {COLORS['primary']}")
|
||||
table.add_column("Thread ID", style="bold")
|
||||
table.add_column("Agent")
|
||||
table.add_column("Last Used", style="dim")
|
||||
|
||||
for t in threads:
|
||||
table.add_row(
|
||||
t["thread_id"],
|
||||
t["agent_name"] or "unknown",
|
||||
_format_timestamp(t.get("updated_at")),
|
||||
)
|
||||
|
||||
console.print()
|
||||
console.print(table)
|
||||
console.print()
|
||||
|
||||
|
||||
async def delete_thread_command(thread_id: str) -> None:
|
||||
"""CLI handler for: deepagents threads delete."""
|
||||
deleted = await delete_thread(thread_id)
|
||||
|
||||
if deleted:
|
||||
console.print(f"[green]Thread '{thread_id}' deleted.[/green]")
|
||||
else:
|
||||
console.print(f"[red]Thread '{thread_id}' not found.[/red]")
|
||||
@@ -1,4 +1,4 @@
|
||||
"""에이전트에 기본 셸 도구를 노출하는 단순화된 미들웨어."""
|
||||
"""Simplified middleware that exposes a basic shell tool to agents."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -13,10 +13,10 @@ from langchain_core.tools.base import ToolException
|
||||
|
||||
|
||||
class ShellMiddleware(AgentMiddleware[AgentState, Any]):
|
||||
"""shell을 통해 에이전트에게 기본 셸 액세스 권한을 부여합니다.
|
||||
"""Give basic shell access to agents via the shell.
|
||||
|
||||
이 셸은 로컬 머신에서 실행되며 CLI 자체에서 제공하는 human-in-the-loop 안전장치 외에는
|
||||
어떠한 안전장치도 없습니다.
|
||||
This shell will execute on the local machine and has NO safeguards except
|
||||
for the human in the loop safeguard provided by the CLI itself.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -27,16 +27,16 @@ class ShellMiddleware(AgentMiddleware[AgentState, Any]):
|
||||
max_output_bytes: int = 100_000,
|
||||
env: dict[str, str] | None = None,
|
||||
) -> None:
|
||||
"""`ShellMiddleware`의 인스턴스를 초기화합니다.
|
||||
"""Initialize an instance of `ShellMiddleware`.
|
||||
|
||||
Args:
|
||||
workspace_root: 셸 명령을 위한 작업 디렉터리.
|
||||
timeout: 명령 완료를 기다리는 최대 시간(초).
|
||||
기본값은 120초입니다.
|
||||
max_output_bytes: 명령 출력에서 캡처할 최대 바이트 수.
|
||||
기본값은 100,000바이트입니다.
|
||||
env: 하위 프로세스에 전달할 환경 변수. None이면
|
||||
현재 프로세스의 환경을 사용합니다. 기본값은 None입니다.
|
||||
workspace_root: Working directory for shell commands.
|
||||
timeout: Maximum time in seconds to wait for command completion.
|
||||
Defaults to 120 seconds.
|
||||
max_output_bytes: Maximum number of bytes to capture from command output.
|
||||
Defaults to 100,000 bytes.
|
||||
env: Environment variables to pass to the subprocess. If None,
|
||||
uses the current process's environment. Defaults to None.
|
||||
"""
|
||||
super().__init__()
|
||||
self._timeout = timeout
|
||||
@@ -45,12 +45,12 @@ class ShellMiddleware(AgentMiddleware[AgentState, Any]):
|
||||
self._env = env if env is not None else os.environ.copy()
|
||||
self._workspace_root = workspace_root
|
||||
|
||||
# Build description with workspace info
|
||||
# Build description with working directory information
|
||||
description = (
|
||||
f"Execute shell commands directly on the host. Commands run in this working directory: "
|
||||
f"{workspace_root}. Each command runs in a fresh shell environment with the "
|
||||
f"current process's environment variables. Commands may be truncated if they exceed "
|
||||
f"configured timeout or output limits."
|
||||
f"Execute a shell command directly on the host. Commands will run in "
|
||||
f"the working directory: {workspace_root}. Each command runs in a fresh shell "
|
||||
f"environment with the current process's environment variables. Commands may "
|
||||
f"be truncated if they exceed the configured timeout or output limits."
|
||||
)
|
||||
|
||||
@tool(self._tool_name, description=description)
|
||||
@@ -75,17 +75,17 @@ class ShellMiddleware(AgentMiddleware[AgentState, Any]):
|
||||
*,
|
||||
tool_call_id: str | None,
|
||||
) -> ToolMessage | str:
|
||||
"""셸 명령을 실행하고 결과를 반환합니다.
|
||||
"""Execute a shell command and return the result.
|
||||
|
||||
Args:
|
||||
command: 실행할 셸 명령.
|
||||
tool_call_id: ToolMessage 생성을 위한 도구 호출 ID.
|
||||
command: The shell command to execute.
|
||||
tool_call_id: The tool call ID for creating a ToolMessage.
|
||||
|
||||
Returns:
|
||||
명령 출력 또는 오류 메시지가 포함된 ToolMessage.
|
||||
A ToolMessage with the command output or an error message.
|
||||
"""
|
||||
if not command or not isinstance(command, str):
|
||||
msg = "Shell 도구는 비어 있지 않은 명령 문자열을 필요로 합니다."
|
||||
msg = "Shell tool expects a non-empty command string."
|
||||
raise ToolException(msg)
|
||||
|
||||
try:
|
||||
@@ -111,20 +111,20 @@ class ShellMiddleware(AgentMiddleware[AgentState, Any]):
|
||||
|
||||
output = "\n".join(output_parts) if output_parts else "<no output>"
|
||||
|
||||
# 필요한 경우 출력 자르기
|
||||
# Truncate output if needed
|
||||
if len(output) > self._max_output_bytes:
|
||||
output = output[: self._max_output_bytes]
|
||||
output += f"\n\n... 출력이 {self._max_output_bytes}바이트에서 잘렸습니다."
|
||||
output += f"\n\n... Output truncated at {self._max_output_bytes} bytes."
|
||||
|
||||
# 0이 아닌 경우 종료 코드 정보 추가
|
||||
# Add exit code info if non-zero
|
||||
if result.returncode != 0:
|
||||
output = f"{output.rstrip()}\n\n종료 코드: {result.returncode}"
|
||||
output = f"{output.rstrip()}\n\nExit code: {result.returncode}"
|
||||
status = "error"
|
||||
else:
|
||||
status = "success"
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
output = f"오류: 명령이 {self._timeout:.1f}초 후에 시간 초과되었습니다."
|
||||
output = f"Error: Command timed out after {self._timeout:.1f} seconds."
|
||||
status = "error"
|
||||
|
||||
return ToolMessage(
|
||||
|
||||
@@ -1,21 +1,18 @@
|
||||
"""deepagents CLI를 위한 Skills 모듈.
|
||||
"""Skills module for deepagents CLI.
|
||||
|
||||
공개 API:
|
||||
- SkillsMiddleware: 기술을 에이전트 실행에 통합하기 위한 미들웨어
|
||||
- execute_skills_command: 기술 하위 명령(list/create/info) 실행
|
||||
- setup_skills_parser: 기술 명령을 위한 argparse 설정
|
||||
Public API:
|
||||
- execute_skills_command: Execute skills subcommands (list/create/info)
|
||||
- setup_skills_parser: Setup argparse configuration for skills commands
|
||||
|
||||
기타 모든 구성 요소는 내부 구현 세부 사항입니다.
|
||||
All other components are internal implementation details.
|
||||
"""
|
||||
|
||||
from deepagents_cli.skills.commands import (
|
||||
execute_skills_command,
|
||||
setup_skills_parser,
|
||||
)
|
||||
from deepagents_cli.skills.middleware import SkillsMiddleware
|
||||
|
||||
__all__ = [
|
||||
"SkillsMiddleware",
|
||||
"execute_skills_command",
|
||||
"setup_skills_parser",
|
||||
]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""기술 관리를 위한 CLI 명령.
|
||||
"""CLI commands for skill management.
|
||||
|
||||
이 명령들은 cli.py를 통해 CLI에 등록됩니다:
|
||||
These commands are registered with the CLI via cli.py:
|
||||
- deepagents skills list --agent <agent> [--project]
|
||||
- deepagents skills create <name>
|
||||
- deepagents skills info <name>
|
||||
@@ -12,188 +12,191 @@ from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from deepagents_cli.config import COLORS, Settings, console
|
||||
from deepagents_cli.skills.load import MAX_SKILL_NAME_LENGTH, list_skills
|
||||
from deepagents_cli.skills.load import list_skills
|
||||
|
||||
MAX_SKILL_NAME_LENGTH = 64
|
||||
|
||||
|
||||
def _validate_name(name: str) -> tuple[bool, str]:
|
||||
"""Agent Skills 사양에 따라 이름을 검증합니다.
|
||||
"""Validate name per Agent Skills spec.
|
||||
|
||||
요구 사항 (https://agentskills.io/specification):
|
||||
- 최대 64자
|
||||
- 소문자 영숫자와 하이픈만 허용 (a-z, 0-9, -)
|
||||
- 하이픈으로 시작하거나 끝날 수 없음
|
||||
- 연속된 하이픈 허용 안 함
|
||||
- 경로 탐색 시퀀스 허용 안 함
|
||||
Requirements (https://agentskills.io/specification):
|
||||
- Max 64 characters
|
||||
- Lowercase alphanumeric and hyphens only (a-z, 0-9, -)
|
||||
- Cannot start or end with hyphen
|
||||
- No consecutive hyphens
|
||||
- No path traversal sequences
|
||||
|
||||
Args:
|
||||
name: 검증할 이름
|
||||
name: The name to validate
|
||||
|
||||
Returns:
|
||||
(유효 여부, 오류 메시지) 튜플. 유효한 경우 오류 메시지는 비어 있습니다.
|
||||
Tuple of (is_valid, error_message). If valid, error_message is empty.
|
||||
"""
|
||||
# 비어 있거나 공백만 있는 이름 확인
|
||||
# Check for empty or whitespace-only names
|
||||
if not name or not name.strip():
|
||||
return False, "비어 있을 수 없습니다"
|
||||
return False, "cannot be empty"
|
||||
|
||||
# 길이 확인 (사양: 최대 64자)
|
||||
# Check length (spec: max 64 chars)
|
||||
if len(name) > MAX_SKILL_NAME_LENGTH:
|
||||
return False, "64자를 초과할 수 없습니다"
|
||||
return False, "cannot exceed 64 characters"
|
||||
|
||||
# 경로 탐색 시퀀스 확인
|
||||
# Check for path traversal sequences
|
||||
if ".." in name or "/" in name or "\\" in name:
|
||||
return False, "경로 요소를 포함할 수 없습니다"
|
||||
return False, "cannot contain path components"
|
||||
|
||||
# 사양: 소문자 영숫자와 하이픈만 허용
|
||||
# 패턴 보장: 시작/종료 하이픈 없음, 연속 하이픈 없음
|
||||
# Spec: lowercase alphanumeric and hyphens only
|
||||
# Pattern ensures: no start/end hyphen, no consecutive hyphens
|
||||
if not re.match(r"^[a-z0-9]+(-[a-z0-9]+)*$", name):
|
||||
return (
|
||||
False,
|
||||
"소문자, 숫자, 하이픈만 사용해야 합니다 (대문자, 밑줄 불가능, 하이픈으로 시작하거나 끝날 수 없음)",
|
||||
"must be lowercase letters, numbers, and hyphens only "
|
||||
"(no uppercase, no underscores, cannot start/end with hyphen)",
|
||||
)
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def _validate_skill_path(skill_dir: Path, base_dir: Path) -> tuple[bool, str]:
|
||||
"""해결된 기술 디렉토리가 기본 디렉토리 내에 있는지 확인합니다.
|
||||
"""Validate that the resolved skill directory is within the base directory.
|
||||
|
||||
Args:
|
||||
skill_dir: 검증할 기술 디렉토리 경로
|
||||
base_dir: skill_dir을 포함해야 하는 기본 기술 디렉토리
|
||||
skill_dir: The skill directory path to validate
|
||||
base_dir: The base skills directory that should contain skill_dir
|
||||
|
||||
Returns:
|
||||
(유효 여부, 오류 메시지) 튜플. 유효한 경우 오류 메시지는 비어 있습니다.
|
||||
Tuple of (is_valid, error_message). If valid, error_message is empty.
|
||||
"""
|
||||
try:
|
||||
# 두 경로를 정식 형식으로 해결
|
||||
# Resolve both paths to their canonical form
|
||||
resolved_skill = skill_dir.resolve()
|
||||
resolved_base = base_dir.resolve()
|
||||
|
||||
# skill_dir이 base_dir 내에 있는지 확인
|
||||
# Python 3.9+인 경우 is_relative_to 사용, 그렇지 않으면 문자열 비교 사용
|
||||
# Check if skill_dir is within base_dir
|
||||
# Use is_relative_to if available (Python 3.9+), otherwise use string comparison
|
||||
if hasattr(resolved_skill, "is_relative_to"):
|
||||
if not resolved_skill.is_relative_to(resolved_base):
|
||||
return False, f"기술 디렉토리는 {base_dir} 내에 있어야 합니다"
|
||||
return False, f"Skill directory must be within {base_dir}"
|
||||
else:
|
||||
# 이전 Python 버전을 위한 폴백
|
||||
# Fallback for older Python versions
|
||||
try:
|
||||
resolved_skill.relative_to(resolved_base)
|
||||
except ValueError:
|
||||
return False, f"기술 디렉토리는 {base_dir} 내에 있어야 합니다"
|
||||
return False, f"Skill directory must be within {base_dir}"
|
||||
|
||||
return True, ""
|
||||
except (OSError, RuntimeError) as e:
|
||||
return False, f"잘못된 경로: {e}"
|
||||
return False, f"Invalid path: {e}"
|
||||
|
||||
|
||||
def _list(agent: str, *, project: bool = False) -> None:
|
||||
"""지정된 에이전트에 대해 사용 가능한 모든 기술을 나열합니다.
|
||||
"""List all available skills for the specified agent.
|
||||
|
||||
Args:
|
||||
agent: 기술을 위한 에이전트 식별자 (기본값: agent).
|
||||
project: True인 경우 프로젝트 기술만 표시합니다.
|
||||
False인 경우 모든 기술(사용자 + 프로젝트)을 표시합니다.
|
||||
agent: Agent identifier for skills (default: agent).
|
||||
project: If True, show only project skills.
|
||||
If False, show all skills (user + project).
|
||||
"""
|
||||
settings = Settings.from_environment()
|
||||
user_skills_dir = settings.get_user_skills_dir(agent)
|
||||
project_skills_dir = settings.get_project_skills_dir()
|
||||
|
||||
# --project 플래그가 사용된 경우 프로젝트 기술만 표시
|
||||
# If --project flag is used, only show project skills
|
||||
if project:
|
||||
if not project_skills_dir:
|
||||
console.print("[yellow]프로젝트 디렉토리가 아닙니다.[/yellow]")
|
||||
console.print("[yellow]Not in a project directory.[/yellow]")
|
||||
console.print(
|
||||
"[dim]프로젝트 기술을 사용하려면 프로젝트 루트에 .git 디렉토리가 필요합니다.[/dim]",
|
||||
"[dim]Project skills require a .git directory in the project root.[/dim]",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
return
|
||||
|
||||
if not project_skills_dir.exists() or not any(project_skills_dir.iterdir()):
|
||||
console.print("[yellow]프로젝트 기술을 찾을 수 없습니다.[/yellow]")
|
||||
console.print("[yellow]No project skills found.[/yellow]")
|
||||
console.print(
|
||||
f"[dim]프로젝트 기술을 추가하면 {project_skills_dir}/ 에 생성됩니다.[/dim]",
|
||||
f"[dim]Project skills will be created in {project_skills_dir}/ when you add them.[/dim]",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
console.print(
|
||||
"\n[dim]프로젝트 기술 생성:\n deepagents skills create my-skill --project[/dim]",
|
||||
"\n[dim]Create a project skill:\n deepagents skills create my-skill --project[/dim]",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
return
|
||||
|
||||
skills = list_skills(user_skills_dir=None, project_skills_dir=project_skills_dir)
|
||||
console.print("\n[bold]프로젝트 기술:[/bold]\n", style=COLORS["primary"])
|
||||
console.print("\n[bold]Project Skills:[/bold]\n", style=COLORS["primary"])
|
||||
else:
|
||||
# 사용자 및 프로젝트 기술 모두 로드
|
||||
# Load both user and project skills
|
||||
skills = list_skills(user_skills_dir=user_skills_dir, project_skills_dir=project_skills_dir)
|
||||
|
||||
if not skills:
|
||||
console.print("[yellow]기술을 찾을 수 없습니다.[/yellow]")
|
||||
console.print("[yellow]No skills found.[/yellow]")
|
||||
console.print(
|
||||
"[dim]기술을 추가하면 ~/.deepagents/agent/skills/ 에 생성됩니다.[/dim]",
|
||||
"[dim]Skills will be created in ~/.deepagents/agent/skills/ when you add them.[/dim]",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
console.print(
|
||||
"\n[dim]첫 번째 기술 생성:\n deepagents skills create my-skill[/dim]",
|
||||
"\n[dim]Create your first skill:\n deepagents skills create my-skill[/dim]",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
return
|
||||
|
||||
console.print("\n[bold]사용 가능한 기술:[/bold]\n", style=COLORS["primary"])
|
||||
console.print("\n[bold]Available Skills:[/bold]\n", style=COLORS["primary"])
|
||||
|
||||
# 출처별로 기술 그룹화
|
||||
# Group skills by source
|
||||
user_skills = [s for s in skills if s["source"] == "user"]
|
||||
project_skills_list = [s for s in skills if s["source"] == "project"]
|
||||
|
||||
# 사용자 기술 표시
|
||||
# Show user skills
|
||||
if user_skills and not project:
|
||||
console.print("[bold cyan]사용자 기술:[/bold cyan]", style=COLORS["primary"])
|
||||
console.print("[bold cyan]User Skills:[/bold cyan]", style=COLORS["primary"])
|
||||
for skill in user_skills:
|
||||
skill_path = Path(skill["path"])
|
||||
console.print(f" • [bold]{skill['name']}[/bold]", style=COLORS["primary"])
|
||||
console.print(f" {skill['description']}", style=COLORS["dim"])
|
||||
console.print(f" 위치: {skill_path.parent}/", style=COLORS["dim"])
|
||||
console.print(f" Location: {skill_path.parent}/", style=COLORS["dim"])
|
||||
console.print()
|
||||
|
||||
# 프로젝트 기술 표시
|
||||
# Show project skills
|
||||
if project_skills_list:
|
||||
if not project and user_skills:
|
||||
console.print()
|
||||
console.print("[bold green]프로젝트 기술:[/bold green]", style=COLORS["primary"])
|
||||
console.print("[bold green]Project Skills:[/bold green]", style=COLORS["primary"])
|
||||
for skill in project_skills_list:
|
||||
skill_path = Path(skill["path"])
|
||||
console.print(f" • [bold]{skill['name']}[/bold]", style=COLORS["primary"])
|
||||
console.print(f" {skill['description']}", style=COLORS["dim"])
|
||||
console.print(f" 위치: {skill_path.parent}/", style=COLORS["dim"])
|
||||
console.print(f" Location: {skill_path.parent}/", style=COLORS["dim"])
|
||||
console.print()
|
||||
|
||||
|
||||
def _create(skill_name: str, agent: str, project: bool = False) -> None:
|
||||
"""템플릿 SKILL.md 파일을 사용하여 새 기술을 생성합니다.
|
||||
"""Create a new skill with a template SKILL.md file.
|
||||
|
||||
Args:
|
||||
skill_name: 생성할 기술의 이름.
|
||||
agent: 기술을 위한 에이전트 식별자
|
||||
project: True인 경우 프로젝트 기술 디렉토리에 생성합니다.
|
||||
False인 경우 사용자 기술 디렉토리에 생성합니다.
|
||||
skill_name: Name of the skill to create.
|
||||
agent: Agent identifier for skills
|
||||
project: If True, create in project skills directory.
|
||||
If False, create in user skills directory.
|
||||
"""
|
||||
# 기술 이름 먼저 검증 (Agent Skills 사양에 따름)
|
||||
# Validate skill name first (per Agent Skills spec)
|
||||
is_valid, error_msg = _validate_name(skill_name)
|
||||
if not is_valid:
|
||||
console.print(f"[bold red]오류:[/bold red] 잘못된 기술 이름: {error_msg}")
|
||||
console.print(f"[bold red]Error:[/bold red] Invalid skill name: {error_msg}")
|
||||
console.print(
|
||||
"[dim]Agent Skills 사양에 따라: 이름은 소문자 영숫자와 하이픈만 사용해야 합니다.\n"
|
||||
"예시: web-research, code-review, data-analysis[/dim]",
|
||||
"[dim]Per Agent Skills spec: names must be lowercase alphanumeric with hyphens only.\n"
|
||||
"Examples: web-research, code-review, data-analysis[/dim]",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
return
|
||||
|
||||
# 대상 디렉토리 결정
|
||||
# Determine target directory
|
||||
settings = Settings.from_environment()
|
||||
if project:
|
||||
if not settings.project_root:
|
||||
console.print("[bold red]오류:[/bold red] 프로젝트 디렉토리가 아닙니다.")
|
||||
console.print("[bold red]Error:[/bold red] Not in a project directory.")
|
||||
console.print(
|
||||
"[dim]프로젝트 기술을 사용하려면 프로젝트 루트에 .git 디렉토리가 필요합니다.[/dim]",
|
||||
"[dim]Project skills require a .git directory in the project root.[/dim]",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
return
|
||||
@@ -203,24 +206,26 @@ def _create(skill_name: str, agent: str, project: bool = False) -> None:
|
||||
|
||||
skill_dir = skills_dir / skill_name
|
||||
|
||||
# 해결된 경로가 skills_dir 내에 있는지 확인
|
||||
# Validate the resolved path is within skills_dir
|
||||
is_valid_path, path_error = _validate_skill_path(skill_dir, skills_dir)
|
||||
if not is_valid_path:
|
||||
console.print(f"[bold red]오류:[/bold red] {path_error}")
|
||||
console.print(f"[bold red]Error:[/bold red] {path_error}")
|
||||
return
|
||||
|
||||
if skill_dir.exists():
|
||||
console.print(f"[bold red]오류:[/bold red] '{skill_name}' 기술이 이미 {skill_dir} 에 존재합니다")
|
||||
console.print(
|
||||
f"[bold red]Error:[/bold red] Skill '{skill_name}' already exists at {skill_dir}"
|
||||
)
|
||||
return
|
||||
|
||||
# 기술 디렉토리 생성
|
||||
# Create skill directory
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# 템플릿 SKILL.md 생성 (사양: https://agentskills.io/specification)
|
||||
# Create template SKILL.md (per Agent Skills spec: https://agentskills.io/specification)
|
||||
template = f"""---
|
||||
name: {skill_name}
|
||||
description: 이 기술이 수행하는 작업과 사용 시기에 대한 간략한 설명.
|
||||
# Agent Skills 사양에 따른 선택적 필드:
|
||||
description: Brief description of what this skill does and when to use it.
|
||||
# Optional fields per Agent Skills spec:
|
||||
# license: Apache-2.0
|
||||
# compatibility: Designed for deepagents CLI
|
||||
# metadata:
|
||||
@@ -229,149 +234,149 @@ description: 이 기술이 수행하는 작업과 사용 시기에 대한 간략
|
||||
# allowed-tools: Bash(git:*) Read
|
||||
---
|
||||
|
||||
# {skill_name.title().replace("-", " ")} 기술
|
||||
# {skill_name.title().replace("-", " ")} Skill
|
||||
|
||||
## 설명
|
||||
## Description
|
||||
|
||||
[이 기술이 수행하는 작업과 사용해야 하는 시기에 대한 자세한 설명을 제공하십시오]
|
||||
[Provide a detailed explanation of what this skill does and when it should be used]
|
||||
|
||||
## 사용 시기
|
||||
## When to Use
|
||||
|
||||
- [시나리오 1: 사용자가 ...를 요청할 때]
|
||||
- [시나리오 2: ...가 필요할 때]
|
||||
- [시나리오 3: 태스크에 ...가 포함될 때]
|
||||
- [Scenario 1: When the user asks...]
|
||||
- [Scenario 2: When you need to...]
|
||||
- [Scenario 3: When the task involves...]
|
||||
|
||||
## 사용 방법
|
||||
## How to Use
|
||||
|
||||
### 1단계: [첫 번째 작업]
|
||||
[먼저 수행할 작업을 설명하십시오]
|
||||
### Step 1: [First Action]
|
||||
[Explain what to do first]
|
||||
|
||||
### 2단계: [두 번째 작업]
|
||||
[다음에 수행할 작업을 설명하십시오]
|
||||
### Step 2: [Second Action]
|
||||
[Explain what to do next]
|
||||
|
||||
### 3단계: [최종 작업]
|
||||
[태스크를 완료하는 방법을 설명하십시오]
|
||||
### Step 3: [Final Action]
|
||||
[Explain how to complete the task]
|
||||
|
||||
## 권장 사항
|
||||
## Best Practices
|
||||
|
||||
- [권장 사항 1]
|
||||
- [권장 사항 2]
|
||||
- [권장 사항 3]
|
||||
- [Best practice 1]
|
||||
- [Best practice 2]
|
||||
- [Best practice 3]
|
||||
|
||||
## 지원 파일
|
||||
## Supporting Files
|
||||
|
||||
이 기술 디렉토리에는 지침에서 참조하는 지원 파일이 포함될 수 있습니다:
|
||||
- `helper.py` - 자동화를 위한 Python 스크립트
|
||||
- `config.json` - 설정 파일
|
||||
- `reference.md` - 추가 참조 문서
|
||||
This skill directory can include supporting files referenced in the instructions:
|
||||
- `helper.py` - Python scripts for automation
|
||||
- `config.json` - Configuration files
|
||||
- `reference.md` - Additional reference documentation
|
||||
|
||||
## 예시
|
||||
## Examples
|
||||
|
||||
### 예시 1: [시나리오 이름]
|
||||
### Example 1: [Scenario Name]
|
||||
|
||||
**사용자 요청:** "[사용자 요청 예시]"
|
||||
**User Request:** "[Example user request]"
|
||||
|
||||
**접근 방식:**
|
||||
1. [단계별 분석]
|
||||
2. [도구 및 명령 사용]
|
||||
3. [예상 결과]
|
||||
**Approach:**
|
||||
1. [Step-by-step breakdown]
|
||||
2. [Using tools and commands]
|
||||
3. [Expected outcome]
|
||||
|
||||
### 예시 2: [다른 시나리오]
|
||||
### Example 2: [Another Scenario]
|
||||
|
||||
**사용자 요청:** "[다른 예시]"
|
||||
**User Request:** "[Another example]"
|
||||
|
||||
**접근 방식:**
|
||||
1. [다른 접근 방식]
|
||||
2. [관련 명령]
|
||||
3. [예상 결과]
|
||||
**Approach:**
|
||||
1. [Different approach]
|
||||
2. [Relevant commands]
|
||||
3. [Expected result]
|
||||
|
||||
## 참고 사항
|
||||
## Notes
|
||||
|
||||
- [추가 팁, 경고 또는 컨텍스트]
|
||||
- [알려진 제한 사항 또는 예외 케이스]
|
||||
- [도움이 되는 외부 리소스 링크]
|
||||
- [Additional tips, warnings, or context]
|
||||
- [Known limitations or edge cases]
|
||||
- [Links to external resources if helpful]
|
||||
"""
|
||||
|
||||
skill_md = skill_dir / "SKILL.md"
|
||||
skill_md.write_text(template)
|
||||
|
||||
console.print(f"✓ '{skill_name}' 기술이 성공적으로 생성되었습니다!", style=COLORS["primary"])
|
||||
console.print(f"위치: {skill_dir}\n", style=COLORS["dim"])
|
||||
console.print(f"✓ Skill '{skill_name}' created successfully!", style=COLORS["primary"])
|
||||
console.print(f"Location: {skill_dir}\n", style=COLORS["dim"])
|
||||
console.print(
|
||||
"[dim]SKILL.md 파일을 편집하여 사용자 정의하십시오:\n"
|
||||
" 1. YAML frontmatter에서 설명을 업데이트하십시오\n"
|
||||
" 2. 지침과 예시를 채우십시오\n"
|
||||
" 3. 지원 파일(스크립트, 설정 등)을 추가하십시오\n"
|
||||
"[dim]Edit the SKILL.md file to customize:\n"
|
||||
" 1. Update the description in YAML frontmatter\n"
|
||||
" 2. Fill in the instructions and examples\n"
|
||||
" 3. Add any supporting files (scripts, configs, etc.)\n"
|
||||
"\n"
|
||||
f" nano {skill_md}\n"
|
||||
"\n"
|
||||
"💡 기술 예시는 deepagents 저장소의 examples/skills/ 를 참조하십시오:\n"
|
||||
" - web-research: 구조화된 연구 워크플로우\n"
|
||||
" - langgraph-docs: LangGraph 문서 조회\n"
|
||||
"💡 See examples/skills/ in the deepagents repo for example skills:\n"
|
||||
" - web-research: Structured research workflow\n"
|
||||
" - langgraph-docs: LangGraph documentation lookup\n"
|
||||
"\n"
|
||||
" 예시 복사: cp -r examples/skills/web-research ~/.deepagents/agent/skills/\n",
|
||||
" Copy an example: cp -r examples/skills/web-research ~/.deepagents/agent/skills/\n",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
|
||||
|
||||
def _info(skill_name: str, *, agent: str = "agent", project: bool = False) -> None:
|
||||
"""특정 기술에 대한 자세한 정보를 표시합니다.
|
||||
"""Show detailed information about a specific skill.
|
||||
|
||||
Args:
|
||||
skill_name: 세부 정보를 표시할 기술의 이름.
|
||||
agent: 기술을 위한 에이전트 식별자 (기본값: agent).
|
||||
project: True인 경우 프로젝트 기술만 검색합니다. False인 경우 사용자 및 프로젝트 기술 모두에서 검색합니다.
|
||||
skill_name: Name of the skill to show info for.
|
||||
agent: Agent identifier for skills (default: agent).
|
||||
project: If True, only search in project skills. If False, search in both user and project skills.
|
||||
"""
|
||||
settings = Settings.from_environment()
|
||||
user_skills_dir = settings.get_user_skills_dir(agent)
|
||||
project_skills_dir = settings.get_project_skills_dir()
|
||||
|
||||
# --project 플래그에 따라 기술 로드
|
||||
# Load skills based on --project flag
|
||||
if project:
|
||||
if not project_skills_dir:
|
||||
console.print("[bold red]오류:[/bold red] 프로젝트 디렉토리가 아닙니다.")
|
||||
console.print("[bold red]Error:[/bold red] Not in a project directory.")
|
||||
return
|
||||
skills = list_skills(user_skills_dir=None, project_skills_dir=project_skills_dir)
|
||||
else:
|
||||
skills = list_skills(user_skills_dir=user_skills_dir, project_skills_dir=project_skills_dir)
|
||||
|
||||
# 기술 찾기
|
||||
# Find the skill
|
||||
skill = next((s for s in skills if s["name"] == skill_name), None)
|
||||
|
||||
if not skill:
|
||||
console.print(f"[bold red]오류:[/bold red] '{skill_name}' 기술을 찾을 수 없습니다.")
|
||||
console.print("\n[dim]사용 가능한 기술:[/dim]", style=COLORS["dim"])
|
||||
console.print(f"[bold red]Error:[/bold red] Skill '{skill_name}' not found.")
|
||||
console.print("\n[dim]Available skills:[/dim]", style=COLORS["dim"])
|
||||
for s in skills:
|
||||
console.print(f" - {s['name']}", style=COLORS["dim"])
|
||||
return
|
||||
|
||||
# 전체 SKILL.md 파일 읽기
|
||||
# Read the full SKILL.md file
|
||||
skill_path = Path(skill["path"])
|
||||
skill_content = skill_path.read_text()
|
||||
|
||||
# 출처 레이블 결정
|
||||
source_label = "프로젝트 기술" if skill["source"] == "project" else "사용자 기술"
|
||||
# Determine source label
|
||||
source_label = "Project Skill" if skill["source"] == "project" else "User Skill"
|
||||
source_color = "green" if skill["source"] == "project" else "cyan"
|
||||
|
||||
console.print(
|
||||
f"\n[bold]기술: {skill['name']}[/bold] [bold {source_color}]({source_label})[/bold {source_color}]\n",
|
||||
f"\n[bold]Skill: {skill['name']}[/bold] [bold {source_color}]({source_label})[/bold {source_color}]\n",
|
||||
style=COLORS["primary"],
|
||||
)
|
||||
console.print(f"[bold]설명:[/bold] {skill['description']}\n", style=COLORS["dim"])
|
||||
console.print(f"[bold]위치:[/bold] {skill_path.parent}/\n", style=COLORS["dim"])
|
||||
console.print(f"[bold]Description:[/bold] {skill['description']}\n", style=COLORS["dim"])
|
||||
console.print(f"[bold]Location:[/bold] {skill_path.parent}/\n", style=COLORS["dim"])
|
||||
|
||||
# 지원 파일 나열
|
||||
# List supporting files
|
||||
skill_dir = skill_path.parent
|
||||
supporting_files = [f for f in skill_dir.iterdir() if f.name != "SKILL.md"]
|
||||
|
||||
if supporting_files:
|
||||
console.print("[bold]지원 파일:[/bold]", style=COLORS["dim"])
|
||||
console.print("[bold]Supporting Files:[/bold]", style=COLORS["dim"])
|
||||
for file in supporting_files:
|
||||
console.print(f" - {file.name}", style=COLORS["dim"])
|
||||
console.print()
|
||||
|
||||
# 전체 SKILL.md 내용 표시
|
||||
console.print("[bold]전체 SKILL.md 내용:[/bold]\n", style=COLORS["primary"])
|
||||
# Show the full SKILL.md content
|
||||
console.print("[bold]Full SKILL.md Content:[/bold]\n", style=COLORS["primary"])
|
||||
console.print(skill_content, style=COLORS["dim"])
|
||||
console.print()
|
||||
|
||||
@@ -379,80 +384,80 @@ def _info(skill_name: str, *, agent: str = "agent", project: bool = False) -> No
|
||||
def setup_skills_parser(
|
||||
subparsers: Any,
|
||||
) -> argparse.ArgumentParser:
|
||||
"""모든 하위 명령과 함께 기술 하위 명령 파서를 설정합니다."""
|
||||
"""Setup the skills subcommand parser with all its subcommands."""
|
||||
skills_parser = subparsers.add_parser(
|
||||
"skills",
|
||||
help="에이전트 기술 관리",
|
||||
description="에이전트 기술 관리 - 기술 정보 생성, 나열 및 보기",
|
||||
help="Manage agent skills",
|
||||
description="Manage agent skills - create, list, and view skill information",
|
||||
)
|
||||
skills_subparsers = skills_parser.add_subparsers(dest="skills_command", help="기술 명령")
|
||||
skills_subparsers = skills_parser.add_subparsers(dest="skills_command", help="Skills command")
|
||||
|
||||
# 기술 목록
|
||||
# Skills list
|
||||
list_parser = skills_subparsers.add_parser(
|
||||
"list", help="사용 가능한 모든 기술 나열", description="사용 가능한 모든 기술 나열"
|
||||
"list", help="List all available skills", description="List all available skills"
|
||||
)
|
||||
list_parser.add_argument(
|
||||
"--agent",
|
||||
default="agent",
|
||||
help="기술을 위한 에이전트 식별자 (기본값: agent)",
|
||||
help="Agent identifier for skills (default: agent)",
|
||||
)
|
||||
list_parser.add_argument(
|
||||
"--project",
|
||||
action="store_true",
|
||||
help="프로젝트 수준 기술만 표시",
|
||||
help="Show only project-level skills",
|
||||
)
|
||||
|
||||
# 기술 생성
|
||||
# Skills create
|
||||
create_parser = skills_subparsers.add_parser(
|
||||
"create",
|
||||
help="새 기술 생성",
|
||||
description="템플릿 SKILL.md 파일을 사용하여 새 기술 생성",
|
||||
help="Create a new skill",
|
||||
description="Create a new skill with a template SKILL.md file",
|
||||
)
|
||||
create_parser.add_argument("name", help="생성할 기술 이름 (예: web-research)")
|
||||
create_parser.add_argument("name", help="Name of the skill to create (e.g., web-research)")
|
||||
create_parser.add_argument(
|
||||
"--agent",
|
||||
default="agent",
|
||||
help="기술을 위한 에이전트 식별자 (기본값: agent)",
|
||||
help="Agent identifier for skills (default: agent)",
|
||||
)
|
||||
create_parser.add_argument(
|
||||
"--project",
|
||||
action="store_true",
|
||||
help="사용자 디렉토리 대신 프로젝트 디렉토리에 기술 생성",
|
||||
help="Create skill in project directory instead of user directory",
|
||||
)
|
||||
|
||||
# 기술 정보
|
||||
# Skills info
|
||||
info_parser = skills_subparsers.add_parser(
|
||||
"info",
|
||||
help="기술에 대한 자세한 정보 표시",
|
||||
description="특정 기술에 대한 자세한 정보 표시",
|
||||
help="Show detailed information about a skill",
|
||||
description="Show detailed information about a specific skill",
|
||||
)
|
||||
info_parser.add_argument("name", help="정보를 표시할 기술 이름")
|
||||
info_parser.add_argument("name", help="Name of the skill to show info for")
|
||||
info_parser.add_argument(
|
||||
"--agent",
|
||||
default="agent",
|
||||
help="기술을 위한 에이전트 식별자 (기본값: agent)",
|
||||
help="Agent identifier for skills (default: agent)",
|
||||
)
|
||||
info_parser.add_argument(
|
||||
"--project",
|
||||
action="store_true",
|
||||
help="프로젝트 기술만 검색",
|
||||
help="Search only in project skills",
|
||||
)
|
||||
return skills_parser
|
||||
|
||||
|
||||
def execute_skills_command(args: argparse.Namespace) -> None:
|
||||
"""파싱된 인수를 기반으로 기술 하위 명령을 실행합니다.
|
||||
"""Execute skills subcommands based on parsed arguments.
|
||||
|
||||
Args:
|
||||
args: skills_command 속성이 있는 파싱된 명령줄 인수
|
||||
args: Parsed command line arguments with skills_command attribute
|
||||
"""
|
||||
# agent 인수 검증
|
||||
# validate agent argument
|
||||
if args.agent:
|
||||
is_valid, error_msg = _validate_name(args.agent)
|
||||
if not is_valid:
|
||||
console.print(f"[bold red]오류:[/bold red] 잘못된 에이전트 이름: {error_msg}")
|
||||
console.print(f"[bold red]Error:[/bold red] Invalid agent name: {error_msg}")
|
||||
console.print(
|
||||
"[dim]에이전트 이름은 영문자, 숫자, 하이픈 및 밑줄만 포함할 수 있습니다.[/dim]",
|
||||
"[dim]Agent names must only contain letters, numbers, hyphens, and underscores.[/dim]",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
return
|
||||
@@ -464,19 +469,19 @@ def execute_skills_command(args: argparse.Namespace) -> None:
|
||||
elif args.skills_command == "info":
|
||||
_info(args.name, agent=args.agent, project=args.project)
|
||||
else:
|
||||
# 하위 명령이 제공되지 않은 경우 도움말 표시
|
||||
console.print("[yellow]기술 하위 명령을 지정하십시오: list, create, 또는 info[/yellow]")
|
||||
console.print("\n[bold]사용법:[/bold]", style=COLORS["primary"])
|
||||
# No subcommand provided, show help
|
||||
console.print("[yellow]Please specify a skills subcommand: list, create, or info[/yellow]")
|
||||
console.print("\n[bold]Usage:[/bold]", style=COLORS["primary"])
|
||||
console.print(" deepagents skills <command> [options]\n")
|
||||
console.print("[bold]사용 가능한 명령:[/bold]", style=COLORS["primary"])
|
||||
console.print(" list 사용 가능한 모든 기술 나열")
|
||||
console.print(" create <name> 새 기술 생성")
|
||||
console.print(" info <name> 기술에 대한 자세한 정보 표시")
|
||||
console.print("\n[bold]예시:[/bold]", style=COLORS["primary"])
|
||||
console.print("[bold]Available commands:[/bold]", style=COLORS["primary"])
|
||||
console.print(" list List all available skills")
|
||||
console.print(" create <name> Create a new skill")
|
||||
console.print(" info <name> Show detailed information about a skill")
|
||||
console.print("\n[bold]Examples:[/bold]", style=COLORS["primary"])
|
||||
console.print(" deepagents skills list")
|
||||
console.print(" deepagents skills create web-research")
|
||||
console.print(" deepagents skills info web-research")
|
||||
console.print("\n[dim]특정 명령에 대한 추가 도움말:[/dim]", style=COLORS["dim"])
|
||||
console.print("\n[dim]For more help on a specific command:[/dim]", style=COLORS["dim"])
|
||||
console.print(" deepagents skills <command> --help", style=COLORS["dim"])
|
||||
|
||||
|
||||
|
||||
@@ -1,319 +1,68 @@
|
||||
"""SKILL.md 파일에서 에이전트 기술을 파싱하고 로드하기 위한 기술 로더.
|
||||
"""Skill loader for CLI commands.
|
||||
|
||||
이 모듈은 YAML frontmatter 파싱을 통해 Anthropic의 에이전트 기술 패턴을 구현합니다.
|
||||
각 기술은 다음을 포함하는 SKILL.md 파일이 있는 디렉토리입니다:
|
||||
- YAML frontmatter (이름, 설명 필수)
|
||||
- 에이전트를 위한 마크다운 지침
|
||||
- 선택적 지원 파일 (스크립트, 설정 등)
|
||||
This module provides filesystem-based skill loading for CLI operations (list, create, info).
|
||||
It wraps the prebuilt middleware functionality from deepagents.middleware.skills and adapts
|
||||
it for direct filesystem access needed by CLI commands.
|
||||
|
||||
SKILL.md 구조 예시:
|
||||
```markdown
|
||||
---
|
||||
name: web-research
|
||||
description: 철저한 웹 조사를 수행하기 위한 구조화된 접근 방식
|
||||
---
|
||||
|
||||
# 웹 조사 기술
|
||||
|
||||
## 사용 시기
|
||||
- 사용자가 주제 조사를 요청할 때
|
||||
...
|
||||
```
|
||||
For middleware usage within agents, use deepagents.middleware.skills.SkillsMiddleware directly.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
from typing import TYPE_CHECKING, NotRequired, TypedDict
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# SKILL.md 파일의 최대 크기 (10MB)
|
||||
MAX_SKILL_FILE_SIZE = 10 * 1024 * 1024
|
||||
|
||||
# Agent Skills 사양 제약 조건 (https://agentskills.io/specification)
|
||||
MAX_SKILL_NAME_LENGTH = 64
|
||||
MAX_SKILL_DESCRIPTION_LENGTH = 1024
|
||||
from deepagents.backends.filesystem import FilesystemBackend
|
||||
from deepagents.middleware.skills import SkillMetadata
|
||||
from deepagents.middleware.skills import _list_skills as list_skills_from_backend
|
||||
|
||||
|
||||
class SkillMetadata(TypedDict):
|
||||
"""Agent Skills 사양(https://agentskills.io/specification)에 따른 기술 메타데이터."""
|
||||
|
||||
name: str
|
||||
"""기술 이름 (최대 64자, 소문자 영숫자와 하이픈)."""
|
||||
|
||||
description: str
|
||||
"""기술이 수행하는 작업에 대한 설명 (최대 1024자)."""
|
||||
|
||||
path: str
|
||||
"""SKILL.md 파일 경로."""
|
||||
class ExtendedSkillMetadata(SkillMetadata):
|
||||
"""Extended skill metadata for CLI display, adds source tracking."""
|
||||
|
||||
source: str
|
||||
"""기술의 출처 ('user' 또는 'project')."""
|
||||
|
||||
# Agent Skills 사양에 따른 선택적 필드
|
||||
license: NotRequired[str | None]
|
||||
"""라이선스 이름 또는 번들로 제공되는 라이선스 파일에 대한 참조."""
|
||||
|
||||
compatibility: NotRequired[str | None]
|
||||
"""환경 요구 사항 (최대 500자)."""
|
||||
|
||||
metadata: NotRequired[dict[str, str] | None]
|
||||
"""추가 메타데이터를 위한 임의의 키-값 매핑."""
|
||||
|
||||
allowed_tools: NotRequired[str | None]
|
||||
"""사전 승인된 도구의 공백으로 구분된 목록."""
|
||||
|
||||
|
||||
def _is_safe_path(path: Path, base_dir: Path) -> bool:
|
||||
"""경로가 base_dir 내에 안전하게 포함되어 있는지 확인합니다.
|
||||
# Re-export for CLI commands
|
||||
__all__ = ["SkillMetadata", "list_skills"]
|
||||
|
||||
심볼릭 링크나 경로 조작을 통한 디렉토리 탐색 공격을 방지합니다.
|
||||
이 함수는 두 경로를 정식 형식(심볼릭 링크 따름)으로 해결하고,
|
||||
대상 경로가 기본 디렉토리 내에 있는지 확인합니다.
|
||||
|
||||
def list_skills(
|
||||
*, user_skills_dir: Path | None = None, project_skills_dir: Path | None = None
|
||||
) -> list[ExtendedSkillMetadata]:
|
||||
"""List skills from user and/or project directories.
|
||||
|
||||
This is a CLI-specific wrapper around the prebuilt middleware's skill loading
|
||||
functionality. It uses FilesystemBackend to load skills from local directories.
|
||||
|
||||
When both directories are provided, project skills with the same name as
|
||||
user skills will override them (project skills take precedence).
|
||||
|
||||
Args:
|
||||
path: 검증할 경로
|
||||
base_dir: 경로를 포함해야 하는 기본 디렉토리
|
||||
user_skills_dir: Path to the user-level skills directory.
|
||||
project_skills_dir: Path to the project-level skills directory.
|
||||
|
||||
Returns:
|
||||
경로가 base_dir 내에 안전하게 있으면 True, 그렇지 않으면 False
|
||||
|
||||
예시:
|
||||
>>> base = Path("/home/user/.deepagents/skills")
|
||||
>>> safe = Path("/home/user/.deepagents/skills/web-research/SKILL.md")
|
||||
>>> unsafe = Path("/home/user/.deepagents/skills/../../.ssh/id_rsa")
|
||||
>>> _is_safe_path(safe, base)
|
||||
True
|
||||
>>> _is_safe_path(unsafe, base)
|
||||
False
|
||||
Merged list of skill metadata from both sources, with project skills
|
||||
taking precedence over user skills when names conflict.
|
||||
"""
|
||||
try:
|
||||
# 두 경로를 정식 형식으로 해결 (심볼릭 링크 따름)
|
||||
resolved_path = path.resolve()
|
||||
resolved_base = base_dir.resolve()
|
||||
all_skills: dict[str, ExtendedSkillMetadata] = {}
|
||||
|
||||
# 해결된 경로가 기본 디렉토리 내에 있는지 확인
|
||||
# 이는 기본 디렉토리 외부를 가리키는 심볼릭 링크를 포착함
|
||||
resolved_path.relative_to(resolved_base)
|
||||
return True
|
||||
except ValueError:
|
||||
# 경로가 base_dir의 하위가 아님 (디렉토리 외부)
|
||||
return False
|
||||
except (OSError, RuntimeError):
|
||||
# 경로 해결 중 오류 발생 (예: 순환 심볼릭 링크, 너무 많은 수준)
|
||||
return False
|
||||
|
||||
|
||||
def _validate_skill_name(name: str, directory_name: str) -> tuple[bool, str]:
|
||||
"""Agent Skills 사양에 따라 기술 이름을 검증합니다.
|
||||
|
||||
요구 사항:
|
||||
- 최대 64자
|
||||
- 소문자 영숫자와 하이픈만 허용 (a-z, 0-9, -)
|
||||
- 하이픈으로 시작하거나 끝날 수 없음
|
||||
- 연속된 하이픈 허용 안 함
|
||||
- 상위 디렉토리 이름과 일치해야 함
|
||||
|
||||
Args:
|
||||
name: YAML frontmatter의 기술 이름.
|
||||
directory_name: 상위 디렉토리 이름.
|
||||
|
||||
Returns:
|
||||
(유효 여부, 오류 메시지) 튜플. 유효한 경우 오류 메시지는 비어 있습니다.
|
||||
"""
|
||||
if not name:
|
||||
return False, "이름은 필수입니다"
|
||||
if len(name) > MAX_SKILL_NAME_LENGTH:
|
||||
return False, "이름이 64자를 초과합니다"
|
||||
# 패턴: 소문자 영숫자, 세그먼트 사이의 단일 하이픈, 시작/종료 하이픈 없음
|
||||
if not re.match(r"^[a-z0-9]+(-[a-z0-9]+)*$", name):
|
||||
return False, "이름은 소문자 영숫자와 단일 하이픈만 사용해야 합니다"
|
||||
if name != directory_name:
|
||||
return False, f"이름 '{name}'은 디렉토리 이름 '{directory_name}'과 일치해야 합니다"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _parse_skill_metadata(skill_md_path: Path, source: str) -> SkillMetadata | None:
|
||||
"""Agent Skills 사양에 따라 SKILL.md 파일에서 YAML frontmatter를 파싱합니다.
|
||||
|
||||
Args:
|
||||
skill_md_path: SKILL.md 파일 경로.
|
||||
source: 기술 출처 ('user' 또는 'project').
|
||||
|
||||
Returns:
|
||||
모든 필드가 포함된 SkillMetadata, 파싱 실패 시 None.
|
||||
"""
|
||||
try:
|
||||
# 보안: DoS 공격 방지를 위해 파일 크기 확인
|
||||
file_size = skill_md_path.stat().st_size
|
||||
if file_size > MAX_SKILL_FILE_SIZE:
|
||||
logger.warning("건너뛰는 중 %s: 파일이 너무 큼 (%d 바이트)", skill_md_path, file_size)
|
||||
return None
|
||||
|
||||
content = skill_md_path.read_text(encoding="utf-8")
|
||||
|
||||
# --- 구분 기호 사이의 YAML frontmatter 매칭
|
||||
frontmatter_pattern = r"^---\s*\n(.*?)\n---\s*\n"
|
||||
match = re.match(frontmatter_pattern, content, re.DOTALL)
|
||||
|
||||
if not match:
|
||||
logger.warning("건너뛰는 중 %s: 유효한 YAML frontmatter를 찾을 수 없음", skill_md_path)
|
||||
return None
|
||||
|
||||
frontmatter_str = match.group(1)
|
||||
|
||||
# 적절한 중첩 구조 지원을 위해 safe_load를 사용하여 YAML 파싱
|
||||
try:
|
||||
frontmatter_data = yaml.safe_load(frontmatter_str)
|
||||
except yaml.YAMLError as e:
|
||||
logger.warning("%s의 잘못된 YAML: %s", skill_md_path, e)
|
||||
return None
|
||||
|
||||
if not isinstance(frontmatter_data, dict):
|
||||
logger.warning("건너뛰는 중 %s: frontmatter가 매핑이 아님", skill_md_path)
|
||||
return None
|
||||
|
||||
# 필수 필드 검증
|
||||
name = frontmatter_data.get("name")
|
||||
description = frontmatter_data.get("description")
|
||||
|
||||
if not name or not description:
|
||||
logger.warning("건너뛰는 중 %s: 필수 'name' 또는 'description'이 누락됨", skill_md_path)
|
||||
return None
|
||||
|
||||
# 사양에 따라 이름 형식 검증 (경고하지만 하위 호환성을 위해 로드함)
|
||||
directory_name = skill_md_path.parent.name
|
||||
is_valid, error = _validate_skill_name(str(name), directory_name)
|
||||
if not is_valid:
|
||||
logger.warning(
|
||||
"%s의 '%s' 기술이 Agent Skills 사양을 따르지 않음: %s. "
|
||||
"사양을 준수하도록 이름을 변경하는 것을 고려하십시오.",
|
||||
skill_md_path,
|
||||
name,
|
||||
error,
|
||||
)
|
||||
|
||||
# 설명 길이 검증 (사양: 최대 1024자)
|
||||
description_str = str(description)
|
||||
if len(description_str) > MAX_SKILL_DESCRIPTION_LENGTH:
|
||||
logger.warning(
|
||||
"%s의 설명이 %d자를 초과하여 잘림",
|
||||
skill_md_path,
|
||||
MAX_SKILL_DESCRIPTION_LENGTH,
|
||||
)
|
||||
description_str = description_str[:MAX_SKILL_DESCRIPTION_LENGTH]
|
||||
|
||||
return SkillMetadata(
|
||||
name=str(name),
|
||||
description=description_str,
|
||||
path=str(skill_md_path),
|
||||
source=source,
|
||||
license=frontmatter_data.get("license"),
|
||||
compatibility=frontmatter_data.get("compatibility"),
|
||||
metadata=frontmatter_data.get("metadata"),
|
||||
allowed_tools=frontmatter_data.get("allowed-tools"),
|
||||
)
|
||||
|
||||
except (OSError, UnicodeDecodeError) as e:
|
||||
logger.warning("%s 읽기 오류: %s", skill_md_path, e)
|
||||
return None
|
||||
|
||||
|
||||
def _list_skills(skills_dir: Path, source: str) -> list[SkillMetadata]:
|
||||
"""단일 기술 디렉토리에서 모든 기술을 나열합니다(내부 헬퍼).
|
||||
|
||||
기술 디렉토리에서 SKILL.md 파일이 포함된 하위 디렉토리를 스캔하고,
|
||||
YAML frontmatter를 파싱하여 기술 메타데이터를 반환합니다.
|
||||
|
||||
기술 조직 구성:
|
||||
skills/
|
||||
├── skill-name/
|
||||
│ ├── SKILL.md # 필수: YAML frontmatter가 있는 지침
|
||||
│ ├── script.py # 선택 사항: 지원 파일
|
||||
│ └── config.json # 선택 사항: 지원 파일
|
||||
|
||||
Args:
|
||||
skills_dir: 기술 디렉토리 경로.
|
||||
source: 기술 출처 ('user' 또는 'project').
|
||||
|
||||
Returns:
|
||||
이름, 설명, 경로 및 출처가 포함된 기술 메타데이터 딕셔너리 목록.
|
||||
"""
|
||||
# 기술 디렉토리 존재 여부 확인
|
||||
skills_dir = skills_dir.expanduser()
|
||||
if not skills_dir.exists():
|
||||
return []
|
||||
|
||||
# 보안 검사를 위해 기본 디렉토리를 정식 경로로 해결
|
||||
try:
|
||||
resolved_base = skills_dir.resolve()
|
||||
except (OSError, RuntimeError):
|
||||
# 기본 디렉토리를 해결할 수 없음, 안전하게 종료
|
||||
return []
|
||||
|
||||
skills: list[SkillMetadata] = []
|
||||
|
||||
# 하위 디렉토리 순회
|
||||
for skill_dir in skills_dir.iterdir():
|
||||
# 보안: 기술 디렉토리 외부를 가리키는 심볼릭 링크 포착
|
||||
if not _is_safe_path(skill_dir, resolved_base):
|
||||
continue
|
||||
|
||||
if not skill_dir.is_dir():
|
||||
continue
|
||||
|
||||
# SKILL.md 파일 찾기
|
||||
skill_md_path = skill_dir / "SKILL.md"
|
||||
if not skill_md_path.exists():
|
||||
continue
|
||||
|
||||
# 보안: 읽기 전에 SKILL.md 경로가 안전한지 검증
|
||||
# 이는 외부를 가리키는 심볼릭 링크인 SKILL.md 파일을 포착함
|
||||
if not _is_safe_path(skill_md_path, resolved_base):
|
||||
continue
|
||||
|
||||
# 메타데이터 파싱
|
||||
metadata = _parse_skill_metadata(skill_md_path, source=source)
|
||||
if metadata:
|
||||
skills.append(metadata)
|
||||
|
||||
return skills
|
||||
|
||||
|
||||
def list_skills(*, user_skills_dir: Path | None = None, project_skills_dir: Path | None = None) -> list[SkillMetadata]:
|
||||
"""사용자 및/또는 프로젝트 디렉토리에서 기술을 나열합니다.
|
||||
|
||||
두 디렉토리가 모두 제공되면 사용자 기술과 이름이 동일한 프로젝트 기술이
|
||||
사용자 기술을 오버라이드합니다.
|
||||
|
||||
Args:
|
||||
user_skills_dir: 사용자 수준 기술 디렉토리 경로.
|
||||
project_skills_dir: 프로젝트 수준 기술 디렉토리 경로.
|
||||
|
||||
Returns:
|
||||
두 출처의 기술 메타데이터가 병합된 목록이며, 이름이 충돌할 경우
|
||||
프로젝트 기술이 사용자 기술보다 우선합니다.
|
||||
"""
|
||||
all_skills: dict[str, SkillMetadata] = {}
|
||||
|
||||
# 사용자 기술 먼저 로드 (기본)
|
||||
if user_skills_dir:
|
||||
user_skills = _list_skills(user_skills_dir, source="user")
|
||||
# Load user skills first (foundation)
|
||||
if user_skills_dir and user_skills_dir.exists():
|
||||
user_backend = FilesystemBackend(root_dir=str(user_skills_dir))
|
||||
user_skills = list_skills_from_backend(backend=user_backend, source_path=".")
|
||||
for skill in user_skills:
|
||||
all_skills[skill["name"]] = skill
|
||||
# Add source field for CLI display
|
||||
extended_skill: ExtendedSkillMetadata = {**skill, "source": "user"}
|
||||
all_skills[skill["name"]] = extended_skill
|
||||
|
||||
# 프로젝트 기술 두 번째로 로드 (오버라이드/확장)
|
||||
if project_skills_dir:
|
||||
project_skills = _list_skills(project_skills_dir, source="project")
|
||||
# Load project skills second (override/augment)
|
||||
if project_skills_dir and project_skills_dir.exists():
|
||||
project_backend = FilesystemBackend(root_dir=str(project_skills_dir))
|
||||
project_skills = list_skills_from_backend(backend=project_backend, source_path=".")
|
||||
for skill in project_skills:
|
||||
# 프로젝트 기술은 이름이 같은 사용자 기술을 오버라이드함
|
||||
all_skills[skill["name"]] = skill
|
||||
# Add source field for CLI display
|
||||
extended_skill: ExtendedSkillMetadata = {**skill, "source": "project"}
|
||||
all_skills[skill["name"]] = extended_skill
|
||||
|
||||
return list(all_skills.values())
|
||||
|
||||
@@ -1,273 +0,0 @@
|
||||
"""에이전트 기술을 시스템 프롬프트에 로드하고 노출하기 위한 미들웨어.
|
||||
|
||||
이 미들웨어는 점진적 노출(progressive disclosure)을 통해 Anthropic의 "Agent Skills" 패턴을 구현합니다:
|
||||
1. 세션 시작 시 SKILL.md 파일에서 YAML frontmatter 파싱
|
||||
2. 시스템 프롬프트에 기술 메타데이터(이름 + 설명) 주입
|
||||
3. 에이전트는 작업과 관련이 있을 때 SKILL.md의 전체 내용을 읽음
|
||||
|
||||
기술 디렉토리 구조 (에이전트별 + 프로젝트):
|
||||
사용자 수준: ~/.deepagents/{AGENT_NAME}/skills/
|
||||
프로젝트 수준: {PROJECT_ROOT}/.deepagents/skills/
|
||||
|
||||
구조 예시:
|
||||
~/.deepagents/{AGENT_NAME}/skills/
|
||||
├── web-research/
|
||||
│ ├── SKILL.md # 필수: YAML frontmatter + 지침
|
||||
│ └── helper.py # 선택 사항: 지원 파일
|
||||
├── code-review/
|
||||
│ ├── SKILL.md
|
||||
│ └── checklist.md
|
||||
|
||||
.deepagents/skills/
|
||||
├── project-specific/
|
||||
│ └── SKILL.md # 프로젝트 전용 기술
|
||||
"""
|
||||
|
||||
from collections.abc import Awaitable, Callable
|
||||
from pathlib import Path
|
||||
from typing import NotRequired, TypedDict, cast
|
||||
|
||||
from langchain.agents.middleware.types import (
|
||||
AgentMiddleware,
|
||||
AgentState,
|
||||
ModelRequest,
|
||||
ModelResponse,
|
||||
)
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
from deepagents_cli.skills.load import SkillMetadata, list_skills
|
||||
|
||||
|
||||
class SkillsState(AgentState):
|
||||
"""기술 미들웨어를 위한 상태."""
|
||||
|
||||
skills_metadata: NotRequired[list[SkillMetadata]]
|
||||
"""로드된 기술 메타데이터 목록 (이름, 설명, 경로)."""
|
||||
|
||||
|
||||
class SkillsStateUpdate(TypedDict):
|
||||
"""기술 미들웨어를 위한 상태 업데이트."""
|
||||
|
||||
skills_metadata: list[SkillMetadata]
|
||||
"""로드된 기술 메타데이터 목록 (이름, 설명, 경로)."""
|
||||
|
||||
|
||||
# 기술 시스템 문서
|
||||
SKILLS_SYSTEM_PROMPT = """
|
||||
|
||||
## 기술 시스템 (Skills System)
|
||||
|
||||
당신은 전문적인 능력과 도메인 지식을 제공하는 기술 라이브러리에 접근할 수 있습니다.
|
||||
|
||||
{skills_locations}
|
||||
|
||||
**사용 가능한 기술:**
|
||||
|
||||
{skills_list}
|
||||
|
||||
**기술 사용 방법 (점진적 노출):**
|
||||
|
||||
기술은 **점진적 노출(progressive disclosure)** 패턴을 따릅니다. 당신은 기술이 존재한다는 것(위의 이름 + 설명)은 알고 있지만, 필요할 때만 전체 지침을 읽습니다:
|
||||
|
||||
1. **기술이 적용되는 시기 파악**: 사용자의 작업이 기술의 설명과 일치하는지 확인하십시오.
|
||||
2. **기술의 전체 지침 읽기**: 위의 기술 목록은 read_file과 함께 사용할 정확한 경로를 보여줍니다.
|
||||
3. **기술의 지침 따르기**: SKILL.md에는 단계별 워크플로우, 권장 사항 및 예시가 포함되어 있습니다.
|
||||
4. **지원 파일 접근**: 기술에는 Python 스크립트, 설정 또는 참조 문서가 포함될 수 있습니다. 절대 경로를 사용하십시오.
|
||||
|
||||
**기술을 사용해야 하는 경우:**
|
||||
- 사용자의 요청이 기술의 도메인과 일치할 때 (예: "X 조사해줘" → web-research 기술)
|
||||
- 전문 지식이나 구조화된 워크플로우가 필요할 때
|
||||
- 기술이 복잡한 작업에 대해 검증된 패턴을 제공할 때
|
||||
|
||||
**기술은 자체 문서화됨:**
|
||||
- 각 SKILL.md는 기술이 수행하는 작업과 사용 방법을 정확하게 알려줍니다.
|
||||
- 위의 기술 목록은 각 기술의 SKILL.md 파일에 대한 전체 경로를 보여줍니다.
|
||||
|
||||
**기술 스크립트 실행:**
|
||||
기술에는 Python 스크립트나 기타 실행 파일이 포함될 수 있습니다. 항상 기술 목록의 절대 경로를 사용하십시오.
|
||||
|
||||
**워크플로우 예시:**
|
||||
|
||||
사용자: "양자 컴퓨팅의 최신 개발 동향을 조사해 줄 수 있어?"
|
||||
|
||||
1. 위에서 사용 가능한 기술 확인 → 전체 경로와 함께 "web-research" 기술 확인
|
||||
2. 목록에 표시된 경로를 사용하여 기술 읽기
|
||||
3. 기술의 조사 워크플로우 따르기 (조사 → 정리 → 합성)
|
||||
4. 절대 경로와 함께 헬퍼 스크립트 사용
|
||||
|
||||
주의: 기술은 당신을 더 유능하고 일관성 있게 만드는 도구입니다. 의심스러울 때는 해당 작업에 대한 기술이 있는지 확인하십시오!
|
||||
"""
|
||||
|
||||
|
||||
class SkillsMiddleware(AgentMiddleware):
|
||||
"""에이전트 기술을 로드하고 노출하기 위한 미들웨어.
|
||||
|
||||
이 미들웨어는 Anthropic의 에이전트 기술 패턴을 구현합니다:
|
||||
- 세션 시작 시 YAML frontmatter에서 기술 메타데이터(이름, 설명)를 로드함
|
||||
- 발견 가능성을 위해 시스템 프롬프트에 기술 목록을 주입함
|
||||
- 기술이 관련 있을 때 에이전트가 전체 SKILL.md 내용을 읽음 (점진적 노출)
|
||||
|
||||
사용자 수준 및 프로젝트 수준 기술을 모두 지원합니다:
|
||||
- 사용자 기술: ~/.deepagents/{AGENT_NAME}/skills/
|
||||
- 프로젝트 기술: {PROJECT_ROOT}/.deepagents/skills/
|
||||
- 프로젝트 기술은 이름이 같은 사용자 기술을 오버라이드함
|
||||
|
||||
Args:
|
||||
skills_dir: 사용자 수준 기술 디렉토리 경로 (에이전트별).
|
||||
assistant_id: 프롬프트의 경로 참조를 위한 에이전트 식별자.
|
||||
project_skills_dir: 선택적인 프로젝트 수준 기술 디렉토리 경로.
|
||||
"""
|
||||
|
||||
state_schema = SkillsState
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
skills_dir: str | Path,
|
||||
assistant_id: str,
|
||||
project_skills_dir: str | Path | None = None,
|
||||
) -> None:
|
||||
"""기술 미들웨어를 초기화합니다.
|
||||
|
||||
Args:
|
||||
skills_dir: 사용자 수준 기술 디렉토리 경로.
|
||||
assistant_id: 에이전트 식별자.
|
||||
project_skills_dir: 선택적인 프로젝트 수준 기술 디렉토리 경로.
|
||||
"""
|
||||
self.skills_dir = Path(skills_dir).expanduser()
|
||||
self.assistant_id = assistant_id
|
||||
self.project_skills_dir = Path(project_skills_dir).expanduser() if project_skills_dir else None
|
||||
# 프롬프트 표시를 위한 경로 저장
|
||||
self.user_skills_display = f"~/.deepagents/{assistant_id}/skills"
|
||||
self.system_prompt_template = SKILLS_SYSTEM_PROMPT
|
||||
|
||||
def _format_skills_locations(self) -> str:
|
||||
"""시스템 프롬프트 표시를 위해 기술 위치 형식을 지정합니다."""
|
||||
locations = [f"**사용자 기술**: `{self.user_skills_display}`"]
|
||||
if self.project_skills_dir:
|
||||
locations.append(f"**프로젝트 기술**: `{self.project_skills_dir}` (사용자 기술을 오버라이드함)")
|
||||
return "\n".join(locations)
|
||||
|
||||
def _format_skills_list(self, skills: list[SkillMetadata]) -> str:
|
||||
"""시스템 프롬프트 표시를 위해 기술 메타데이터 형식을 지정합니다."""
|
||||
if not skills:
|
||||
locations = [f"{self.user_skills_display}/"]
|
||||
if self.project_skills_dir:
|
||||
locations.append(f"{self.project_skills_dir}/")
|
||||
return f"(현재 사용 가능한 기술이 없습니다. {' 또는 '.join(locations)} 에 기술을 생성할 수 있습니다)"
|
||||
|
||||
# 출처별로 기술 그룹화
|
||||
user_skills = [s for s in skills if s["source"] == "user"]
|
||||
project_skills = [s for s in skills if s["source"] == "project"]
|
||||
|
||||
lines = []
|
||||
|
||||
# 사용자 기술 표시
|
||||
if user_skills:
|
||||
lines.append("**사용자 기술:**")
|
||||
for skill in user_skills:
|
||||
lines.append(f"- **{skill['name']}**: {skill['description']}")
|
||||
lines.append(f" → 전체 지침을 보려면 `{skill['path']}` 읽기")
|
||||
lines.append("")
|
||||
|
||||
# 프로젝트 기술 표시
|
||||
if project_skills:
|
||||
lines.append("**프로젝트 기술:**")
|
||||
for skill in project_skills:
|
||||
lines.append(f"- **{skill['name']}**: {skill['description']}")
|
||||
lines.append(f" → 전체 지침을 보려면 `{skill['path']}` 읽기")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def before_agent(self, state: SkillsState, runtime: Runtime) -> SkillsStateUpdate | None:
|
||||
"""에이전트 실행 전 기술 메타데이터를 로드합니다.
|
||||
|
||||
이는 사용자 수준 및 프로젝트 수준 디렉토리 모두에서 사용 가능한 기술을 검색하기 위해
|
||||
세션 시작 시 한 번 실행됩니다.
|
||||
|
||||
Args:
|
||||
state: 현재 에이전트 상태.
|
||||
runtime: 런타임 컨텍스트.
|
||||
|
||||
Returns:
|
||||
skills_metadata가 채워진 업데이트된 상태.
|
||||
"""
|
||||
# 기술 디렉토리의 변경 사항을 포착하기 위해
|
||||
# 에이전트와의 매 상호 작용마다 기술을 다시 로드합니다.
|
||||
skills = list_skills(
|
||||
user_skills_dir=self.skills_dir,
|
||||
project_skills_dir=self.project_skills_dir,
|
||||
)
|
||||
return SkillsStateUpdate(skills_metadata=skills)
|
||||
|
||||
def wrap_model_call(
|
||||
self,
|
||||
request: ModelRequest,
|
||||
handler: Callable[[ModelRequest], ModelResponse],
|
||||
) -> ModelResponse:
|
||||
"""시스템 프롬프트에 기술 문서를 주입합니다.
|
||||
|
||||
이것은 기술 정보가 항상 사용 가능하도록 매 모델 호출 시 실행됩니다.
|
||||
|
||||
Args:
|
||||
request: 처리 중인 모델 요청.
|
||||
handler: 수정된 요청으로 호출할 핸들러 함수.
|
||||
|
||||
Returns:
|
||||
핸들러의 모델 응답.
|
||||
"""
|
||||
# 상태에서 기술 메타데이터 가져오기
|
||||
skills_metadata = request.state.get("skills_metadata", [])
|
||||
|
||||
# 기술 위치 및 목록 형식 지정
|
||||
skills_locations = self._format_skills_locations()
|
||||
skills_list = self._format_skills_list(skills_metadata)
|
||||
|
||||
# 기술 문서 형식 지정
|
||||
skills_section = self.system_prompt_template.format(
|
||||
skills_locations=skills_locations,
|
||||
skills_list=skills_list,
|
||||
)
|
||||
|
||||
if request.system_prompt:
|
||||
system_prompt = request.system_prompt + "\n\n" + skills_section
|
||||
else:
|
||||
system_prompt = skills_section
|
||||
|
||||
return handler(request.override(system_prompt=system_prompt))
|
||||
|
||||
async def awrap_model_call(
|
||||
self,
|
||||
request: ModelRequest,
|
||||
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
|
||||
) -> ModelResponse:
|
||||
"""(비동기) 시스템 프롬프트에 기술 문서를 주입합니다.
|
||||
|
||||
Args:
|
||||
request: 처리 중인 모델 요청.
|
||||
handler: 수정된 요청으로 호출할 핸들러 함수.
|
||||
|
||||
Returns:
|
||||
핸들러의 모델 응답.
|
||||
"""
|
||||
# state_schema로 인해 상태는 SkillsState임이 보장됨
|
||||
state = cast("SkillsState", request.state)
|
||||
skills_metadata = state.get("skills_metadata", [])
|
||||
|
||||
# 기술 위치 및 목록 형식 지정
|
||||
skills_locations = self._format_skills_locations()
|
||||
skills_list = self._format_skills_list(skills_metadata)
|
||||
|
||||
# 기술 문서 형식 지정
|
||||
skills_section = self.system_prompt_template.format(
|
||||
skills_locations=skills_locations,
|
||||
skills_list=skills_list,
|
||||
)
|
||||
|
||||
# 시스템 프롬프트에 주입
|
||||
if request.system_prompt:
|
||||
system_prompt = request.system_prompt + "\n\n" + skills_section
|
||||
else:
|
||||
system_prompt = skills_section
|
||||
|
||||
return await handler(request.override(system_prompt=system_prompt))
|
||||
@@ -0,0 +1,594 @@
|
||||
"""Textual UI adapter for agent execution."""
|
||||
# ruff: noqa: PLR0912, PLR0915, ANN401, PLR2004, BLE001, TRY203
|
||||
# This module has complex streaming logic ported from execution.py
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from datetime import UTC, datetime
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from langchain.agents.middleware.human_in_the_loop import (
|
||||
ActionRequest,
|
||||
HITLRequest,
|
||||
HITLResponse,
|
||||
)
|
||||
from langchain_core.messages import HumanMessage, ToolMessage
|
||||
from langgraph.types import Command, Interrupt
|
||||
from pydantic import TypeAdapter, ValidationError
|
||||
|
||||
from deepagents_cli.file_ops import FileOpTracker
|
||||
from deepagents_cli.image_utils import create_multimodal_content
|
||||
from deepagents_cli.input import ImageTracker, parse_file_mentions
|
||||
from deepagents_cli.ui import format_tool_display, format_tool_message_content
|
||||
from deepagents_cli.widgets.messages import (
|
||||
AssistantMessage,
|
||||
DiffMessage,
|
||||
ErrorMessage,
|
||||
SystemMessage,
|
||||
ToolCallMessage,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
_HITL_REQUEST_ADAPTER = TypeAdapter(HITLRequest)
|
||||
|
||||
|
||||
class TextualUIAdapter:
|
||||
"""Adapter for rendering agent output to Textual widgets.
|
||||
|
||||
This adapter provides an abstraction layer between the agent execution
|
||||
and the Textual UI, allowing streaming output to be rendered as widgets.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
mount_message: Callable,
|
||||
update_status: Callable[[str], None],
|
||||
request_approval: Callable, # async callable returning Future
|
||||
on_auto_approve_enabled: Callable[[], None] | None = None,
|
||||
scroll_to_bottom: Callable[[], None] | None = None,
|
||||
) -> None:
|
||||
"""Initialize the adapter.
|
||||
|
||||
Args:
|
||||
mount_message: Async callable to mount a message widget
|
||||
update_status: Callable to update the status bar message
|
||||
request_approval: Callable that returns a Future for HITL approval
|
||||
on_auto_approve_enabled: Callback when auto-approve is enabled
|
||||
scroll_to_bottom: Callback to scroll chat to bottom
|
||||
"""
|
||||
self._mount_message = mount_message
|
||||
self._update_status = update_status
|
||||
self._request_approval = request_approval
|
||||
self._on_auto_approve_enabled = on_auto_approve_enabled
|
||||
self._scroll_to_bottom = scroll_to_bottom
|
||||
|
||||
# State tracking
|
||||
self._current_assistant_message: AssistantMessage | None = None
|
||||
self._current_tool_messages: dict[str, ToolCallMessage] = {}
|
||||
self._pending_text = ""
|
||||
self._token_tracker: Any = None
|
||||
|
||||
def set_token_tracker(self, tracker: Any) -> None:
|
||||
"""Set the token tracker for usage tracking."""
|
||||
self._token_tracker = tracker
|
||||
|
||||
|
||||
async def execute_task_textual(
|
||||
user_input: str,
|
||||
agent: Any,
|
||||
assistant_id: str | None,
|
||||
session_state: Any,
|
||||
adapter: TextualUIAdapter,
|
||||
backend: Any = None,
|
||||
image_tracker: ImageTracker | None = None,
|
||||
) -> None:
|
||||
"""Execute a task with output directed to Textual UI.
|
||||
|
||||
This is the Textual-compatible version of execute_task() that uses
|
||||
the TextualUIAdapter for all UI operations.
|
||||
|
||||
Args:
|
||||
user_input: The user's input message
|
||||
agent: The LangGraph agent to execute
|
||||
assistant_id: The agent identifier
|
||||
session_state: Session state with auto_approve flag
|
||||
adapter: The TextualUIAdapter for UI operations
|
||||
backend: Optional backend for file operations
|
||||
image_tracker: Optional tracker for images
|
||||
"""
|
||||
# Parse file mentions and inject content if any
|
||||
prompt_text, mentioned_files = parse_file_mentions(user_input)
|
||||
|
||||
# Max file size to embed inline (256KB, matching mistral-vibe)
|
||||
# Larger files get a reference instead - use read_file tool to view them
|
||||
max_embed_bytes = 256 * 1024
|
||||
|
||||
if mentioned_files:
|
||||
context_parts = [prompt_text, "\n\n## Referenced Files\n"]
|
||||
for file_path in mentioned_files:
|
||||
try:
|
||||
file_size = file_path.stat().st_size
|
||||
if file_size > max_embed_bytes:
|
||||
# File too large - include reference instead of content
|
||||
size_kb = file_size // 1024
|
||||
context_parts.append(
|
||||
f"\n### {file_path.name}\n"
|
||||
f"Path: `{file_path}`\n"
|
||||
f"Size: {size_kb}KB (too large to embed, use read_file tool to view)"
|
||||
)
|
||||
else:
|
||||
content = file_path.read_text()
|
||||
context_parts.append(
|
||||
f"\n### {file_path.name}\nPath: `{file_path}`\n```\n{content}\n```"
|
||||
)
|
||||
except Exception as e:
|
||||
context_parts.append(f"\n### {file_path.name}\n[Error reading file: {e}]")
|
||||
final_input = "\n".join(context_parts)
|
||||
else:
|
||||
final_input = prompt_text
|
||||
|
||||
# Include images in the message content
|
||||
images_to_send = []
|
||||
if image_tracker:
|
||||
images_to_send = image_tracker.get_images()
|
||||
if images_to_send:
|
||||
message_content = create_multimodal_content(final_input, images_to_send)
|
||||
else:
|
||||
message_content = final_input
|
||||
|
||||
thread_id = session_state.thread_id
|
||||
config = {
|
||||
"configurable": {"thread_id": thread_id},
|
||||
"metadata": {
|
||||
"assistant_id": assistant_id,
|
||||
"agent_name": assistant_id,
|
||||
"updated_at": datetime.now(UTC).isoformat(),
|
||||
}
|
||||
if assistant_id
|
||||
else {},
|
||||
}
|
||||
|
||||
captured_input_tokens = 0
|
||||
captured_output_tokens = 0
|
||||
|
||||
# Update status to show thinking
|
||||
adapter._update_status("Agent is thinking...")
|
||||
|
||||
file_op_tracker = FileOpTracker(assistant_id=assistant_id, backend=backend)
|
||||
displayed_tool_ids: set[str] = set()
|
||||
tool_call_buffers: dict[str | int, dict] = {}
|
||||
|
||||
# Track pending text and assistant messages PER NAMESPACE to avoid interleaving
|
||||
# when multiple subagents stream in parallel
|
||||
pending_text_by_namespace: dict[tuple, str] = {}
|
||||
assistant_message_by_namespace: dict[tuple, Any] = {}
|
||||
|
||||
# Clear images from tracker after creating the message
|
||||
if image_tracker:
|
||||
image_tracker.clear()
|
||||
|
||||
stream_input: dict | Command = {"messages": [{"role": "user", "content": message_content}]}
|
||||
|
||||
try:
|
||||
while True:
|
||||
interrupt_occurred = False
|
||||
hitl_response: dict[str, HITLResponse] = {}
|
||||
suppress_resumed_output = False
|
||||
pending_interrupts: dict[str, HITLRequest] = {}
|
||||
|
||||
async for chunk in agent.astream(
|
||||
stream_input,
|
||||
stream_mode=["messages", "updates"],
|
||||
subgraphs=True,
|
||||
config=config,
|
||||
durability="exit",
|
||||
):
|
||||
if not isinstance(chunk, tuple) or len(chunk) != 3:
|
||||
continue
|
||||
|
||||
namespace, current_stream_mode, data = chunk
|
||||
|
||||
# Convert namespace to hashable tuple for dict keys
|
||||
ns_key = tuple(namespace) if namespace else ()
|
||||
|
||||
# Filter out subagent outputs - only show main agent (empty namespace)
|
||||
# Subagents run via Task tool and should only report back to the main agent
|
||||
is_main_agent = ns_key == ()
|
||||
|
||||
# Handle UPDATES stream - for interrupts and todos
|
||||
if current_stream_mode == "updates":
|
||||
if not isinstance(data, dict):
|
||||
continue
|
||||
|
||||
# Check for interrupts
|
||||
if "__interrupt__" in data:
|
||||
interrupts: list[Interrupt] = data["__interrupt__"]
|
||||
if interrupts:
|
||||
for interrupt_obj in interrupts:
|
||||
try:
|
||||
validated_request = _HITL_REQUEST_ADAPTER.validate_python(
|
||||
interrupt_obj.value
|
||||
)
|
||||
pending_interrupts[interrupt_obj.id] = validated_request
|
||||
interrupt_occurred = True
|
||||
except ValidationError:
|
||||
raise
|
||||
|
||||
# Check for todo updates (not yet implemented in Textual UI)
|
||||
chunk_data = next(iter(data.values())) if data else None
|
||||
if chunk_data and isinstance(chunk_data, dict) and "todos" in chunk_data:
|
||||
pass # Future: render todo list widget
|
||||
|
||||
# Handle MESSAGES stream - for content and tool calls
|
||||
elif current_stream_mode == "messages":
|
||||
# Skip subagent outputs - only render main agent content in chat
|
||||
if not is_main_agent:
|
||||
continue
|
||||
|
||||
if not isinstance(data, tuple) or len(data) != 2:
|
||||
continue
|
||||
|
||||
message, _metadata = data
|
||||
|
||||
if isinstance(message, HumanMessage):
|
||||
content = message.text
|
||||
# Flush pending text for this namespace
|
||||
pending_text = pending_text_by_namespace.get(ns_key, "")
|
||||
if content and pending_text:
|
||||
await _flush_assistant_text_ns(
|
||||
adapter, pending_text, ns_key, assistant_message_by_namespace
|
||||
)
|
||||
pending_text_by_namespace[ns_key] = ""
|
||||
continue
|
||||
|
||||
if isinstance(message, ToolMessage):
|
||||
tool_name = getattr(message, "name", "")
|
||||
tool_status = getattr(message, "status", "success")
|
||||
tool_content = format_tool_message_content(message.content)
|
||||
record = file_op_tracker.complete_with_message(message)
|
||||
|
||||
adapter._update_status("Agent is thinking...")
|
||||
|
||||
# Update tool call status with output
|
||||
tool_id = getattr(message, "tool_call_id", None)
|
||||
if tool_id and tool_id in adapter._current_tool_messages:
|
||||
tool_msg = adapter._current_tool_messages[tool_id]
|
||||
output_str = str(tool_content) if tool_content else ""
|
||||
if tool_status == "success":
|
||||
tool_msg.set_success(output_str)
|
||||
else:
|
||||
tool_msg.set_error(output_str or "Error")
|
||||
# Clean up - remove from tracking dict after status update
|
||||
del adapter._current_tool_messages[tool_id]
|
||||
|
||||
# Show shell errors
|
||||
if tool_name == "shell" and tool_status != "success":
|
||||
pending_text = pending_text_by_namespace.get(ns_key, "")
|
||||
if pending_text:
|
||||
await _flush_assistant_text_ns(
|
||||
adapter, pending_text, ns_key, assistant_message_by_namespace
|
||||
)
|
||||
pending_text_by_namespace[ns_key] = ""
|
||||
if tool_content:
|
||||
await adapter._mount_message(ErrorMessage(str(tool_content)))
|
||||
|
||||
# Show file operation results - always show diffs in chat
|
||||
if record:
|
||||
pending_text = pending_text_by_namespace.get(ns_key, "")
|
||||
if pending_text:
|
||||
await _flush_assistant_text_ns(
|
||||
adapter, pending_text, ns_key, assistant_message_by_namespace
|
||||
)
|
||||
pending_text_by_namespace[ns_key] = ""
|
||||
if record.diff:
|
||||
await adapter._mount_message(
|
||||
DiffMessage(record.diff, record.display_path)
|
||||
)
|
||||
continue
|
||||
|
||||
# Check if this is an AIMessageChunk
|
||||
if not hasattr(message, "content_blocks"):
|
||||
continue
|
||||
|
||||
# Extract token usage
|
||||
if adapter._token_tracker and hasattr(message, "usage_metadata"):
|
||||
usage = message.usage_metadata
|
||||
if usage:
|
||||
input_toks = usage.get("input_tokens", 0)
|
||||
output_toks = usage.get("output_tokens", 0)
|
||||
if input_toks or output_toks:
|
||||
captured_input_tokens = max(captured_input_tokens, input_toks)
|
||||
captured_output_tokens = max(captured_output_tokens, output_toks)
|
||||
|
||||
# Process content blocks
|
||||
for block in message.content_blocks:
|
||||
block_type = block.get("type")
|
||||
|
||||
if block_type == "text":
|
||||
text = block.get("text", "")
|
||||
if text:
|
||||
# Track accumulated text for reference
|
||||
pending_text = pending_text_by_namespace.get(ns_key, "")
|
||||
pending_text += text
|
||||
pending_text_by_namespace[ns_key] = pending_text
|
||||
|
||||
# Get or create assistant message for this namespace
|
||||
current_msg = assistant_message_by_namespace.get(ns_key)
|
||||
if current_msg is None:
|
||||
current_msg = AssistantMessage()
|
||||
await adapter._mount_message(current_msg)
|
||||
assistant_message_by_namespace[ns_key] = current_msg
|
||||
# Anchor scroll once when message is created
|
||||
# anchor() keeps scroll locked to bottom as content grows
|
||||
if adapter._scroll_to_bottom:
|
||||
adapter._scroll_to_bottom()
|
||||
|
||||
# Append just the new text chunk for smoother streaming
|
||||
# (uses MarkdownStream internally for better performance)
|
||||
await current_msg.append_content(text)
|
||||
|
||||
elif block_type in ("tool_call_chunk", "tool_call"):
|
||||
chunk_name = block.get("name")
|
||||
chunk_args = block.get("args")
|
||||
chunk_id = block.get("id")
|
||||
chunk_index = block.get("index")
|
||||
|
||||
buffer_key: str | int
|
||||
if chunk_index is not None:
|
||||
buffer_key = chunk_index
|
||||
elif chunk_id is not None:
|
||||
buffer_key = chunk_id
|
||||
else:
|
||||
buffer_key = f"unknown-{len(tool_call_buffers)}"
|
||||
|
||||
buffer = tool_call_buffers.setdefault(
|
||||
buffer_key,
|
||||
{"name": None, "id": None, "args": None, "args_parts": []},
|
||||
)
|
||||
|
||||
if chunk_name:
|
||||
buffer["name"] = chunk_name
|
||||
if chunk_id:
|
||||
buffer["id"] = chunk_id
|
||||
|
||||
if isinstance(chunk_args, dict):
|
||||
buffer["args"] = chunk_args
|
||||
buffer["args_parts"] = []
|
||||
elif isinstance(chunk_args, str):
|
||||
if chunk_args:
|
||||
parts: list[str] = buffer.setdefault("args_parts", [])
|
||||
if not parts or chunk_args != parts[-1]:
|
||||
parts.append(chunk_args)
|
||||
buffer["args"] = "".join(parts)
|
||||
elif chunk_args is not None:
|
||||
buffer["args"] = chunk_args
|
||||
|
||||
buffer_name = buffer.get("name")
|
||||
buffer_id = buffer.get("id")
|
||||
if buffer_name is None:
|
||||
continue
|
||||
|
||||
parsed_args = buffer.get("args")
|
||||
if isinstance(parsed_args, str):
|
||||
if not parsed_args:
|
||||
continue
|
||||
try:
|
||||
parsed_args = json.loads(parsed_args)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
elif parsed_args is None:
|
||||
continue
|
||||
|
||||
if not isinstance(parsed_args, dict):
|
||||
parsed_args = {"value": parsed_args}
|
||||
|
||||
# Flush pending text before tool call
|
||||
pending_text = pending_text_by_namespace.get(ns_key, "")
|
||||
if pending_text:
|
||||
await _flush_assistant_text_ns(
|
||||
adapter, pending_text, ns_key, assistant_message_by_namespace
|
||||
)
|
||||
pending_text_by_namespace[ns_key] = ""
|
||||
assistant_message_by_namespace.pop(ns_key, None)
|
||||
|
||||
if buffer_id is not None and buffer_id not in displayed_tool_ids:
|
||||
displayed_tool_ids.add(buffer_id)
|
||||
file_op_tracker.start_operation(buffer_name, parsed_args, buffer_id)
|
||||
|
||||
# Mount tool call message
|
||||
tool_msg = ToolCallMessage(buffer_name, parsed_args)
|
||||
await adapter._mount_message(tool_msg)
|
||||
adapter._current_tool_messages[buffer_id] = tool_msg
|
||||
|
||||
tool_call_buffers.pop(buffer_key, None)
|
||||
display_str = format_tool_display(buffer_name, parsed_args)
|
||||
adapter._update_status(f"Executing {display_str}...")
|
||||
|
||||
if getattr(message, "chunk_position", None) == "last":
|
||||
pending_text = pending_text_by_namespace.get(ns_key, "")
|
||||
if pending_text:
|
||||
await _flush_assistant_text_ns(
|
||||
adapter, pending_text, ns_key, assistant_message_by_namespace
|
||||
)
|
||||
pending_text_by_namespace[ns_key] = ""
|
||||
assistant_message_by_namespace.pop(ns_key, None)
|
||||
|
||||
# Flush any remaining text from all namespaces
|
||||
for ns_key, pending_text in list(pending_text_by_namespace.items()):
|
||||
if pending_text:
|
||||
await _flush_assistant_text_ns(
|
||||
adapter, pending_text, ns_key, assistant_message_by_namespace
|
||||
)
|
||||
pending_text_by_namespace.clear()
|
||||
assistant_message_by_namespace.clear()
|
||||
|
||||
# Handle HITL after stream completes
|
||||
if interrupt_occurred:
|
||||
any_rejected = False
|
||||
|
||||
for interrupt_id, hitl_request in pending_interrupts.items():
|
||||
if session_state.auto_approve:
|
||||
# Auto-approve silently (user sees tool calls already)
|
||||
decisions = [{"type": "approve"} for _ in hitl_request["action_requests"]]
|
||||
hitl_response[interrupt_id] = {"decisions": decisions}
|
||||
else:
|
||||
# Request approval via adapter
|
||||
decisions = []
|
||||
|
||||
def mark_hitl_approved(action_request: ActionRequest) -> None:
|
||||
tool_name = action_request.get("name")
|
||||
if tool_name not in {"write_file", "edit_file"}:
|
||||
return
|
||||
args = action_request.get("args", {})
|
||||
if isinstance(args, dict):
|
||||
file_op_tracker.mark_hitl_approved(tool_name, args)
|
||||
|
||||
for action_request in hitl_request["action_requests"]:
|
||||
future = await adapter._request_approval(action_request, assistant_id)
|
||||
decision = await future
|
||||
|
||||
# Check for auto-approve-all
|
||||
if (
|
||||
isinstance(decision, dict)
|
||||
and decision.get("type") == "auto_approve_all"
|
||||
):
|
||||
session_state.auto_approve = True
|
||||
if adapter._on_auto_approve_enabled:
|
||||
adapter._on_auto_approve_enabled()
|
||||
decisions.append({"type": "approve"})
|
||||
mark_hitl_approved(action_request)
|
||||
# Approve remaining actions
|
||||
for _ in hitl_request["action_requests"][len(decisions) :]:
|
||||
decisions.append({"type": "approve"})
|
||||
break
|
||||
|
||||
decisions.append(decision)
|
||||
# Try multiple keys for tool call id
|
||||
tool_id = (
|
||||
action_request.get("id")
|
||||
or action_request.get("tool_call_id")
|
||||
or action_request.get("call_id")
|
||||
)
|
||||
tool_name = action_request.get("name", "")
|
||||
|
||||
# Find matching tool message - by id or by name as fallback
|
||||
tool_msg = None
|
||||
tool_msg_key = None # Track key for cleanup
|
||||
if tool_id and tool_id in adapter._current_tool_messages:
|
||||
tool_msg = adapter._current_tool_messages[tool_id]
|
||||
tool_msg_key = tool_id
|
||||
elif tool_name:
|
||||
# Fallback: find last tool message with matching name
|
||||
for key, msg in reversed(
|
||||
list(adapter._current_tool_messages.items())
|
||||
):
|
||||
if msg._tool_name == tool_name:
|
||||
tool_msg = msg
|
||||
tool_msg_key = key
|
||||
break
|
||||
|
||||
if isinstance(decision, dict) and decision.get("type") == "approve":
|
||||
mark_hitl_approved(action_request)
|
||||
# Don't call set_success here - wait for actual tool output
|
||||
# The ToolMessage handler will update with real results
|
||||
elif isinstance(decision, dict) and decision.get("type") == "reject":
|
||||
if tool_msg:
|
||||
tool_msg.set_rejected()
|
||||
# Only remove from tracking on reject (approved tools need output update)
|
||||
if tool_msg_key and tool_msg_key in adapter._current_tool_messages:
|
||||
del adapter._current_tool_messages[tool_msg_key]
|
||||
|
||||
if any(d.get("type") == "reject" for d in decisions):
|
||||
any_rejected = True
|
||||
|
||||
hitl_response[interrupt_id] = {"decisions": decisions}
|
||||
|
||||
suppress_resumed_output = any_rejected
|
||||
|
||||
if interrupt_occurred and hitl_response:
|
||||
if suppress_resumed_output:
|
||||
await adapter._mount_message(
|
||||
SystemMessage("Command rejected. Tell the agent what you'd like instead.")
|
||||
)
|
||||
return
|
||||
|
||||
stream_input = Command(resume=hitl_response)
|
||||
else:
|
||||
break
|
||||
|
||||
except asyncio.CancelledError:
|
||||
adapter._update_status("Interrupted")
|
||||
|
||||
# Mark any pending tools as rejected
|
||||
for tool_msg in list(adapter._current_tool_messages.values()):
|
||||
tool_msg.set_rejected()
|
||||
adapter._current_tool_messages.clear()
|
||||
|
||||
await adapter._mount_message(SystemMessage("Interrupted by user"))
|
||||
|
||||
# Append cancellation message to agent state so LLM knows what happened
|
||||
# This preserves context rather than rolling back
|
||||
try:
|
||||
cancellation_msg = HumanMessage(
|
||||
content="[SYSTEM] Task interrupted by user. Previous operation was cancelled."
|
||||
)
|
||||
await agent.aupdate_state(config, {"messages": [cancellation_msg]})
|
||||
except Exception: # noqa: S110
|
||||
pass # State update is best-effort
|
||||
return
|
||||
|
||||
except KeyboardInterrupt:
|
||||
adapter._update_status("Interrupted")
|
||||
|
||||
# Mark any pending tools as rejected
|
||||
for tool_msg in list(adapter._current_tool_messages.values()):
|
||||
tool_msg.set_rejected()
|
||||
adapter._current_tool_messages.clear()
|
||||
|
||||
await adapter._mount_message(SystemMessage("Interrupted by user"))
|
||||
|
||||
# Append cancellation message to agent state
|
||||
try:
|
||||
cancellation_msg = HumanMessage(
|
||||
content="[SYSTEM] Task interrupted by user. Previous operation was cancelled."
|
||||
)
|
||||
await agent.aupdate_state(config, {"messages": [cancellation_msg]})
|
||||
except Exception: # noqa: S110
|
||||
pass # State update is best-effort
|
||||
return
|
||||
|
||||
adapter._update_status("Ready")
|
||||
|
||||
# Update token tracker
|
||||
if adapter._token_tracker and (captured_input_tokens or captured_output_tokens):
|
||||
adapter._token_tracker.add(captured_input_tokens, captured_output_tokens)
|
||||
|
||||
|
||||
async def _flush_assistant_text_ns(
|
||||
adapter: TextualUIAdapter,
|
||||
text: str,
|
||||
ns_key: tuple,
|
||||
assistant_message_by_namespace: dict[tuple, Any],
|
||||
) -> None:
|
||||
"""Flush accumulated assistant text for a specific namespace.
|
||||
|
||||
Finalizes the streaming by stopping the MarkdownStream.
|
||||
If no message exists yet, creates one with the full content.
|
||||
"""
|
||||
if not text.strip():
|
||||
return
|
||||
|
||||
current_msg = assistant_message_by_namespace.get(ns_key)
|
||||
if current_msg is None:
|
||||
# No message was created during streaming - create one with full content
|
||||
current_msg = AssistantMessage(text)
|
||||
await adapter._mount_message(current_msg)
|
||||
await current_msg.write_initial_content()
|
||||
assistant_message_by_namespace[ns_key] = current_msg
|
||||
else:
|
||||
# Stop the stream to finalize the content
|
||||
await current_msg.stop_stream()
|
||||
@@ -1,116 +0,0 @@
|
||||
"""Utilities for accurate token counting using LangChain models."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from langchain_core.messages import SystemMessage
|
||||
|
||||
from deepagents_cli.config import console, settings
|
||||
|
||||
|
||||
def calculate_baseline_tokens(model, agent_dir: Path, system_prompt: str, assistant_id: str) -> int:
|
||||
"""Calculate baseline context tokens using the model's official tokenizer.
|
||||
|
||||
This uses the model's get_num_tokens_from_messages() method to get
|
||||
accurate token counts for the initial context (system prompt + agent.md).
|
||||
|
||||
Note: Tool definitions cannot be accurately counted before the first API call
|
||||
due to LangChain limitations. They will be included in the total after the
|
||||
first message is sent (~5,000 tokens).
|
||||
|
||||
Args:
|
||||
model: LangChain model instance (ChatAnthropic or ChatOpenAI)
|
||||
agent_dir: Path to agent directory containing agent.md
|
||||
system_prompt: The base system prompt string
|
||||
assistant_id: The agent identifier for path references
|
||||
|
||||
Returns:
|
||||
Token count for system prompt + agent.md (tools not included)
|
||||
"""
|
||||
# Load user agent.md content
|
||||
agent_md_path = agent_dir / "agent.md"
|
||||
user_memory = ""
|
||||
if agent_md_path.exists():
|
||||
user_memory = agent_md_path.read_text()
|
||||
|
||||
# Load project agent.md content
|
||||
from .config import _find_project_agent_md, _find_project_root
|
||||
|
||||
project_memory = ""
|
||||
project_root = _find_project_root()
|
||||
if project_root:
|
||||
project_md_paths = _find_project_agent_md(project_root)
|
||||
if project_md_paths:
|
||||
try:
|
||||
# Combine all project agent.md files (if multiple exist)
|
||||
contents = []
|
||||
for path in project_md_paths:
|
||||
contents.append(path.read_text())
|
||||
project_memory = "\n\n".join(contents)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Build the complete system prompt as it will be sent
|
||||
# This mimics what AgentMemoryMiddleware.wrap_model_call() does
|
||||
memory_section = (
|
||||
f"<user_memory>\n{user_memory or '(No user agent.md)'}\n</user_memory>\n\n"
|
||||
f"<project_memory>\n{project_memory or '(No project agent.md)'}\n</project_memory>"
|
||||
)
|
||||
|
||||
# Get the long-term memory system prompt
|
||||
memory_system_prompt = get_memory_system_prompt(
|
||||
assistant_id, project_root, bool(project_memory)
|
||||
)
|
||||
|
||||
# Combine all parts in the same order as the middleware
|
||||
full_system_prompt = memory_section + "\n\n" + system_prompt + "\n\n" + memory_system_prompt
|
||||
|
||||
# Count tokens using the model's official method
|
||||
messages = [SystemMessage(content=full_system_prompt)]
|
||||
|
||||
try:
|
||||
# Note: tools parameter is not supported by LangChain's token counting
|
||||
# Tool tokens will be included in the API response after first message
|
||||
return model.get_num_tokens_from_messages(messages)
|
||||
except Exception as e:
|
||||
# Fallback if token counting fails
|
||||
console.print(f"[yellow]Warning: Could not calculate baseline tokens: {e}[/yellow]")
|
||||
return 0
|
||||
|
||||
|
||||
def get_memory_system_prompt(
|
||||
assistant_id: str, project_root: Path | None = None, has_project_memory: bool = False
|
||||
) -> str:
|
||||
"""Get the long-term memory system prompt text.
|
||||
|
||||
Args:
|
||||
assistant_id: The agent identifier for path references
|
||||
project_root: Path to the detected project root (if any)
|
||||
has_project_memory: Whether project memory was loaded
|
||||
"""
|
||||
# Import from agent_memory middleware
|
||||
from .agent_memory import LONGTERM_MEMORY_SYSTEM_PROMPT
|
||||
|
||||
agent_dir = settings.get_agent_dir(assistant_id)
|
||||
agent_dir_absolute = str(agent_dir)
|
||||
agent_dir_display = f"~/.deepagents/{assistant_id}"
|
||||
|
||||
# Build project memory info
|
||||
if project_root and has_project_memory:
|
||||
project_memory_info = f"`{project_root}` (detected)"
|
||||
elif project_root:
|
||||
project_memory_info = f"`{project_root}` (no agent.md found)"
|
||||
else:
|
||||
project_memory_info = "None (not in a git project)"
|
||||
|
||||
# Build project deepagents directory path
|
||||
if project_root:
|
||||
project_deepagents_dir = f"{project_root}/.deepagents"
|
||||
else:
|
||||
project_deepagents_dir = "[project-root]/.deepagents (not in a project)"
|
||||
|
||||
return LONGTERM_MEMORY_SYSTEM_PROMPT.format(
|
||||
agent_dir_absolute=agent_dir_absolute,
|
||||
agent_dir_display=agent_dir_display,
|
||||
project_memory_info=project_memory_info,
|
||||
project_deepagents_dir=project_deepagents_dir,
|
||||
)
|
||||
@@ -1,10 +1,10 @@
|
||||
"""CLI 에이전트를 위한 사용자 정의 도구."""
|
||||
"""Custom tools for the CLI agent."""
|
||||
|
||||
from typing import Any, Literal
|
||||
|
||||
import requests # type: ignore
|
||||
from markdownify import markdownify # type: ignore
|
||||
from tavily import TavilyClient # type: ignore
|
||||
import requests
|
||||
from markdownify import markdownify
|
||||
from tavily import TavilyClient
|
||||
|
||||
from deepagents_cli.config import settings
|
||||
|
||||
@@ -20,10 +20,10 @@ def http_request(
|
||||
params: dict[str, str] | None = None,
|
||||
timeout: int = 30,
|
||||
) -> dict[str, Any]:
|
||||
"""Sends an HTTP request to an API or web service.
|
||||
"""Make HTTP requests to APIs and web services.
|
||||
|
||||
Args:
|
||||
url: The URL to target
|
||||
url: Target URL
|
||||
method: HTTP method (GET, POST, PUT, DELETE, etc.)
|
||||
headers: HTTP headers to include
|
||||
data: Request body data (string or dict)
|
||||
@@ -31,7 +31,7 @@ def http_request(
|
||||
timeout: Request timeout in seconds
|
||||
|
||||
Returns:
|
||||
Dictionary containing status_code, headers, and content
|
||||
Dictionary with response data including status, headers, and content
|
||||
"""
|
||||
try:
|
||||
kwargs = {"url": url, "method": method.upper(), "timeout": timeout}
|
||||
@@ -66,7 +66,7 @@ def http_request(
|
||||
"success": False,
|
||||
"status_code": 0,
|
||||
"headers": {},
|
||||
"content": f"{timeout}초 후 요청 시간이 초과되었습니다",
|
||||
"content": f"Request timed out after {timeout} seconds",
|
||||
"url": url,
|
||||
}
|
||||
except requests.exceptions.RequestException as e:
|
||||
@@ -74,7 +74,7 @@ def http_request(
|
||||
"success": False,
|
||||
"status_code": 0,
|
||||
"headers": {},
|
||||
"content": f"요청 오류: {e!s}",
|
||||
"content": f"Request error: {e!s}",
|
||||
"url": url,
|
||||
}
|
||||
except Exception as e:
|
||||
@@ -82,7 +82,7 @@ def http_request(
|
||||
"success": False,
|
||||
"status_code": 0,
|
||||
"headers": {},
|
||||
"content": f"요청 생성 오류: {e!s}",
|
||||
"content": f"Error making request: {e!s}",
|
||||
"url": url,
|
||||
}
|
||||
|
||||
@@ -93,36 +93,36 @@ def web_search(
|
||||
topic: Literal["general", "news", "finance"] = "general",
|
||||
include_raw_content: bool = False,
|
||||
):
|
||||
"""Performs a web search using Tavily for current information and documents.
|
||||
"""Search the web using Tavily for current information and documentation.
|
||||
|
||||
This tool searches the web and returns relevant results. After receiving results,
|
||||
you should synthesize the information into a natural response that helps the user.
|
||||
you MUST synthesize the information into a natural, helpful response for the user.
|
||||
|
||||
Args:
|
||||
query: The search query (specific and detailed)
|
||||
query: The search query (be specific and detailed)
|
||||
max_results: Number of results to return (default: 5)
|
||||
topic: The topic type of the search - "general" for most queries, "news" for current events
|
||||
include_raw_content: Include full page content (Warning: uses more tokens)
|
||||
topic: Search topic type - "general" for most queries, "news" for current events
|
||||
include_raw_content: Include full page content (warning: uses more tokens)
|
||||
|
||||
Returns:
|
||||
Dictionary containing:
|
||||
- results: List of search results, each containing:
|
||||
- results: List of search results, each with:
|
||||
- title: Page title
|
||||
- url: Page URL
|
||||
- content: Relevant snippet from the page
|
||||
- content: Relevant excerpt from the page
|
||||
- score: Relevance score (0-1)
|
||||
- query: Original search query
|
||||
- query: The original search query
|
||||
|
||||
IMPORTANT: After using this tool:
|
||||
1. Read the 'content' field of each result
|
||||
1. Read through the 'content' field of each result
|
||||
2. Extract relevant information that answers the user's question
|
||||
3. Synthesize this into a clear, natural language response
|
||||
4. Cite sources by mentioning the page title or URL
|
||||
5. Do NOT show raw JSON to the user - always provide a formatted response
|
||||
4. Cite sources by mentioning the page titles or URLs
|
||||
5. NEVER show the raw JSON to the user - always provide a formatted response
|
||||
"""
|
||||
if tavily_client is None:
|
||||
return {
|
||||
"error": "Tavily API 키가 구성되지 않았습니다. TAVILY_API_KEY 환경 변수를 설정하십시오.",
|
||||
"error": "Tavily API key not configured. Please set TAVILY_API_KEY environment variable.",
|
||||
"query": query,
|
||||
}
|
||||
|
||||
@@ -134,15 +134,15 @@ def web_search(
|
||||
topic=topic,
|
||||
)
|
||||
except Exception as e:
|
||||
return {"error": f"웹 검색 오류: {e!s}", "query": query}
|
||||
return {"error": f"Web search error: {e!s}", "query": query}
|
||||
|
||||
|
||||
def fetch_url(url: str, timeout: int = 30) -> dict[str, Any]:
|
||||
"""Fetches content from a URL and converts HTML to markdown format.
|
||||
"""Fetch content from a URL and convert HTML to markdown format.
|
||||
|
||||
This tool fetches web page content and converts it to clean markdown text,
|
||||
making it easier to read and process HTML content. After receiving markdown,
|
||||
you should synthesize the information into a natural response that helps the user.
|
||||
making it easy to read and process HTML content. After receiving the markdown,
|
||||
you MUST synthesize the information into a natural, helpful response for the user.
|
||||
|
||||
Args:
|
||||
url: The URL to fetch (must be a valid HTTP/HTTPS URL)
|
||||
@@ -150,17 +150,17 @@ def fetch_url(url: str, timeout: int = 30) -> dict[str, Any]:
|
||||
|
||||
Returns:
|
||||
Dictionary containing:
|
||||
- success: Whether the request was successful
|
||||
- url: Final URL after redirects
|
||||
- success: Whether the request succeeded
|
||||
- url: The final URL after redirects
|
||||
- markdown_content: The page content converted to markdown
|
||||
- status_code: HTTP status code
|
||||
- content_length: Length of markdown content (in characters)
|
||||
- content_length: Length of the markdown content in characters
|
||||
|
||||
IMPORTANT: After using this tool:
|
||||
1. Read the markdown_content
|
||||
1. Read through the markdown content
|
||||
2. Extract relevant information that answers the user's question
|
||||
3. Synthesize this into a clear, natural language response
|
||||
4. Do NOT show raw markdown to the user unless specifically requested
|
||||
4. NEVER show the raw markdown to the user unless specifically requested
|
||||
"""
|
||||
try:
|
||||
response = requests.get(
|
||||
@@ -180,4 +180,4 @@ def fetch_url(url: str, timeout: int = 30) -> dict[str, Any]:
|
||||
"content_length": len(markdown_content),
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": f"URL 가져오기 오류: {e!s}", "url": url}
|
||||
return {"error": f"Fetch URL error: {e!s}", "url": url}
|
||||
|
||||
@@ -1,197 +1,151 @@
|
||||
"""CLI를 위한 UI 렌더링 및 디스플레이 유틸리티."""
|
||||
"""UI rendering and display utilities for the CLI."""
|
||||
|
||||
import json
|
||||
import re
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from rich import box
|
||||
from rich.markup import escape
|
||||
from rich.panel import Panel
|
||||
from rich.text import Text
|
||||
|
||||
from .config import COLORS, COMMANDS, DEEP_AGENTS_ASCII, MAX_ARG_LENGTH, console
|
||||
from .file_ops import FileOperationRecord
|
||||
from .config import COLORS, DEEP_AGENTS_ASCII, MAX_ARG_LENGTH, console
|
||||
|
||||
|
||||
def truncate_value(value: str, max_length: int = MAX_ARG_LENGTH) -> str:
|
||||
"""max_length를 초과하는 경우 문자열 값을 자릅니다."""
|
||||
"""Truncate a string value if it exceeds max_length."""
|
||||
if len(value) > max_length:
|
||||
return value[:max_length] + "..."
|
||||
return value
|
||||
|
||||
|
||||
def format_tool_display(tool_name: str, tool_args: dict) -> str:
|
||||
"""도구 호출을 도구별 스마트 포맷팅으로 표시합니다.
|
||||
"""Format tool calls for display with tool-specific smart formatting.
|
||||
|
||||
모든 인수보다는 각 도구 유형에 가장 관련성 높은 정보를 표시합니다.
|
||||
Shows the most relevant information for each tool type rather than all arguments.
|
||||
|
||||
Args:
|
||||
tool_name: 호출되는 도구의 이름
|
||||
tool_args: 도구 인수 딕셔너리
|
||||
tool_name: Name of the tool being called
|
||||
tool_args: Dictionary of tool arguments
|
||||
|
||||
Returns:
|
||||
표시용으로 포맷팅된 문자열 (예: "read_file(config.py)")
|
||||
Formatted string for display (e.g., "read_file(config.py)")
|
||||
|
||||
Examples:
|
||||
read_file(path="/long/path/file.py") → "read_file(file.py)"
|
||||
web_search(query="how to code", max_results=5) → 'web_search("how to code")'
|
||||
shell(command="pip install foo") → 'shell("pip install foo")'
|
||||
"""
|
||||
|
||||
def abbreviate_path(path_str: str, max_length: int = 60) -> str:
|
||||
"""Abbreviate a file path intelligently - show basename or relative path."""
|
||||
try:
|
||||
path = Path(path_str)
|
||||
|
||||
# If it's just a filename (no directory parts), return as-is
|
||||
if len(path.parts) == 1:
|
||||
return path_str
|
||||
|
||||
# Try to get relative path from current working directory
|
||||
try:
|
||||
rel_path = path.relative_to(Path.cwd())
|
||||
rel_str = str(rel_path)
|
||||
# Use relative if it's shorter and not too long
|
||||
if len(rel_str) < len(path_str) and len(rel_str) <= max_length:
|
||||
return rel_str
|
||||
except (ValueError, Exception):
|
||||
pass
|
||||
|
||||
# If absolute path is reasonable length, use it
|
||||
if len(path_str) <= max_length:
|
||||
return path_str
|
||||
|
||||
# Otherwise, just show basename (filename only)
|
||||
return path.name
|
||||
except Exception:
|
||||
# Fallback to original string if any error
|
||||
return truncate_value(path_str, max_length)
|
||||
|
||||
# Tool-specific formatting - show the most important argument(s)
|
||||
if tool_name in ("read_file", "write_file", "edit_file"):
|
||||
return _format_file_tool(tool_name, tool_args)
|
||||
# File operations: show the primary file path argument (file_path or path)
|
||||
path_value = tool_args.get("file_path")
|
||||
if path_value is None:
|
||||
path_value = tool_args.get("path")
|
||||
if path_value is not None:
|
||||
path = abbreviate_path(str(path_value))
|
||||
return f"{tool_name}({path})"
|
||||
|
||||
if tool_name == "web_search":
|
||||
return _format_web_search_tool(tool_name, tool_args)
|
||||
elif tool_name == "web_search":
|
||||
# Web search: show the query string
|
||||
if "query" in tool_args:
|
||||
query = str(tool_args["query"])
|
||||
query = truncate_value(query, 100)
|
||||
return f'{tool_name}("{query}")'
|
||||
|
||||
if tool_name == "grep":
|
||||
return _format_grep_tool(tool_name, tool_args)
|
||||
elif tool_name == "grep":
|
||||
# Grep: show the search pattern
|
||||
if "pattern" in tool_args:
|
||||
pattern = str(tool_args["pattern"])
|
||||
pattern = truncate_value(pattern, 70)
|
||||
return f'{tool_name}("{pattern}")'
|
||||
|
||||
if tool_name == "shell":
|
||||
return _format_shell_tool(tool_name, tool_args)
|
||||
elif tool_name == "shell":
|
||||
# Shell: show the command being executed
|
||||
if "command" in tool_args:
|
||||
command = str(tool_args["command"])
|
||||
command = truncate_value(command, 120)
|
||||
return f'{tool_name}("{command}")'
|
||||
|
||||
if tool_name == "ls":
|
||||
return _format_ls_tool(tool_name, tool_args)
|
||||
elif tool_name == "ls":
|
||||
# ls: show directory, or empty if current directory
|
||||
if tool_args.get("path"):
|
||||
path = abbreviate_path(str(tool_args["path"]))
|
||||
return f"{tool_name}({path})"
|
||||
return f"{tool_name}()"
|
||||
|
||||
if tool_name == "glob":
|
||||
return _format_glob_tool(tool_name, tool_args)
|
||||
elif tool_name == "glob":
|
||||
# Glob: show the pattern
|
||||
if "pattern" in tool_args:
|
||||
pattern = str(tool_args["pattern"])
|
||||
pattern = truncate_value(pattern, 80)
|
||||
return f'{tool_name}("{pattern}")'
|
||||
|
||||
if tool_name == "http_request":
|
||||
return _format_http_request_tool(tool_name, tool_args)
|
||||
elif tool_name == "http_request":
|
||||
# HTTP: show method and URL
|
||||
parts = []
|
||||
if "method" in tool_args:
|
||||
parts.append(str(tool_args["method"]).upper())
|
||||
if "url" in tool_args:
|
||||
url = str(tool_args["url"])
|
||||
url = truncate_value(url, 80)
|
||||
parts.append(url)
|
||||
if parts:
|
||||
return f"{tool_name}({' '.join(parts)})"
|
||||
|
||||
if tool_name == "fetch_url":
|
||||
return _format_fetch_url_tool(tool_name, tool_args)
|
||||
elif tool_name == "fetch_url":
|
||||
# Fetch URL: show the URL being fetched
|
||||
if "url" in tool_args:
|
||||
url = str(tool_args["url"])
|
||||
url = truncate_value(url, 80)
|
||||
return f'{tool_name}("{url}")'
|
||||
|
||||
if tool_name == "task":
|
||||
return _format_task_tool(tool_name, tool_args)
|
||||
elif tool_name == "task":
|
||||
# Task: show the task description
|
||||
if "description" in tool_args:
|
||||
desc = str(tool_args["description"])
|
||||
desc = truncate_value(desc, 100)
|
||||
return f'{tool_name}("{desc}")'
|
||||
|
||||
if tool_name == "write_todos":
|
||||
return _format_write_todos_tool(tool_name, tool_args)
|
||||
elif tool_name == "write_todos":
|
||||
# Todos: show count of items
|
||||
if "todos" in tool_args and isinstance(tool_args["todos"], list):
|
||||
count = len(tool_args["todos"])
|
||||
return f"{tool_name}({count} items)"
|
||||
|
||||
# Fallback: generic formatting
|
||||
arg_str = ", ".join(f"{k}={truncate_value(str(v), 20)}" for k, v in tool_args.items())
|
||||
return f"{tool_name}({arg_str})"
|
||||
|
||||
|
||||
def _abbreviate_path(path_str: str, max_length: int = 60) -> str:
|
||||
"""파일 경로를 지능적으로 축약합니다 - 베이스네임 또는 상대 경로를 표시합니다."""
|
||||
try:
|
||||
path = Path(path_str)
|
||||
|
||||
# If it's just a filename (no directory parts), return as-is
|
||||
if len(path.parts) == 1:
|
||||
return path_str
|
||||
|
||||
# Try to get relative path from current working directory
|
||||
try:
|
||||
rel_path = path.relative_to(Path.cwd())
|
||||
rel_str = str(rel_path)
|
||||
# Use relative if it's shorter and not too long
|
||||
if len(rel_str) < len(path_str) and len(rel_str) <= max_length:
|
||||
return rel_str
|
||||
except (ValueError, Exception):
|
||||
pass
|
||||
|
||||
# If absolute path is reasonable length, use it
|
||||
if len(path_str) <= max_length:
|
||||
return path_str
|
||||
|
||||
# Otherwise, just show basename (filename only)
|
||||
return path.name
|
||||
except Exception:
|
||||
# Fallback to original string if any error
|
||||
return truncate_value(path_str, max_length)
|
||||
|
||||
|
||||
def _format_file_tool(tool_name: str, tool_args: dict) -> str:
|
||||
path_value = tool_args.get("file_path")
|
||||
if path_value is None:
|
||||
path_value = tool_args.get("path")
|
||||
if path_value is not None:
|
||||
path = _abbreviate_path(str(path_value))
|
||||
return f"{tool_name}({path})"
|
||||
return f"{tool_name}(...)"
|
||||
|
||||
|
||||
def _format_web_search_tool(tool_name: str, tool_args: dict) -> str:
|
||||
if "query" in tool_args:
|
||||
query = str(tool_args["query"])
|
||||
query = truncate_value(query, 100)
|
||||
return f'{tool_name}("{query}")'
|
||||
return f"{tool_name}()"
|
||||
|
||||
|
||||
def _format_grep_tool(tool_name: str, tool_args: dict) -> str:
|
||||
if "pattern" in tool_args:
|
||||
pattern = str(tool_args["pattern"])
|
||||
pattern = truncate_value(pattern, 70)
|
||||
return f'{tool_name}("{pattern}")'
|
||||
return f"{tool_name}()"
|
||||
|
||||
|
||||
def _format_shell_tool(tool_name: str, tool_args: dict) -> str:
|
||||
if "command" in tool_args:
|
||||
command = str(tool_args["command"])
|
||||
command = truncate_value(command, 120)
|
||||
return f'{tool_name}("{command}")'
|
||||
return f"{tool_name}()"
|
||||
|
||||
|
||||
def _format_ls_tool(tool_name: str, tool_args: dict) -> str:
|
||||
if tool_args.get("path"):
|
||||
path = _abbreviate_path(str(tool_args["path"]))
|
||||
return f"{tool_name}({path})"
|
||||
return f"{tool_name}()"
|
||||
|
||||
|
||||
def _format_glob_tool(tool_name: str, tool_args: dict) -> str:
|
||||
if "pattern" in tool_args:
|
||||
pattern = str(tool_args["pattern"])
|
||||
pattern = truncate_value(pattern, 80)
|
||||
return f'{tool_name}("{pattern}")'
|
||||
return f"{tool_name}()"
|
||||
|
||||
|
||||
def _format_http_request_tool(tool_name: str, tool_args: dict) -> str:
|
||||
parts = []
|
||||
if "method" in tool_args:
|
||||
parts.append(str(tool_args["method"]).upper())
|
||||
if "url" in tool_args:
|
||||
url = str(tool_args["url"])
|
||||
url = truncate_value(url, 80)
|
||||
parts.append(url)
|
||||
if parts:
|
||||
return f"{tool_name}({' '.join(parts)})"
|
||||
return f"{tool_name}()"
|
||||
|
||||
|
||||
def _format_fetch_url_tool(tool_name: str, tool_args: dict) -> str:
|
||||
if "url" in tool_args:
|
||||
url = str(tool_args["url"])
|
||||
url = truncate_value(url, 80)
|
||||
return f'{tool_name}("{url}")'
|
||||
return f"{tool_name}()"
|
||||
|
||||
|
||||
def _format_task_tool(tool_name: str, tool_args: dict) -> str:
|
||||
if "description" in tool_args:
|
||||
desc = str(tool_args["description"])
|
||||
desc = truncate_value(desc, 100)
|
||||
return f'{tool_name}("{desc}")'
|
||||
return f"{tool_name}()"
|
||||
|
||||
|
||||
def _format_write_todos_tool(tool_name: str, tool_args: dict) -> str:
|
||||
if "todos" in tool_args and isinstance(tool_args["todos"], list):
|
||||
count = len(tool_args["todos"])
|
||||
return f"{tool_name}({count} items)"
|
||||
return f"{tool_name}()"
|
||||
# Fallback: generic formatting for unknown tools
|
||||
# Show all arguments in key=value format
|
||||
args_str = ", ".join(f"{k}={truncate_value(str(v), 50)}" for k, v in tool_args.items())
|
||||
return f"{tool_name}({args_str})"
|
||||
|
||||
|
||||
def format_tool_message_content(content: Any) -> str:
|
||||
"""ToolMessage 내용을 출력 가능한 문자열로 변환합니다."""
|
||||
"""Convert ToolMessage content into a printable string."""
|
||||
if content is None:
|
||||
return ""
|
||||
if isinstance(content, list):
|
||||
@@ -208,437 +162,81 @@ def format_tool_message_content(content: Any) -> str:
|
||||
return str(content)
|
||||
|
||||
|
||||
class TokenTracker:
|
||||
"""대화 전반에 걸친 토큰 사용량을 추적합니다."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.baseline_context = 0 # Baseline system context (system + agent.md + tools)
|
||||
self.current_context = 0 # Total context including messages
|
||||
self.last_output = 0
|
||||
|
||||
def set_baseline(self, tokens: int) -> None:
|
||||
"""기준 컨텍스트 토큰 수를 설정합니다.
|
||||
|
||||
Args:
|
||||
tokens: 기준 토큰 수 (시스템 프롬프트 + agent.md + 도구)
|
||||
"""
|
||||
self.baseline_context = tokens
|
||||
self.current_context = tokens
|
||||
|
||||
def reset(self) -> None:
|
||||
"""기준으로 재설정합니다 (/clear 명령용)."""
|
||||
self.current_context = self.baseline_context
|
||||
self.last_output = 0
|
||||
|
||||
def add(self, input_tokens: int, output_tokens: int) -> None:
|
||||
"""응답에서 토큰을 추가합니다."""
|
||||
# input_tokens IS the current context size (what was sent to the model)
|
||||
self.current_context = input_tokens
|
||||
self.last_output = output_tokens
|
||||
|
||||
def display_last(self) -> None:
|
||||
"""이번 턴 이후의 현재 컨텍스트 크기를 표시합니다."""
|
||||
if self.last_output and self.last_output >= 1000:
|
||||
console.print(f" 생성됨: {self.last_output:,} 토큰", style="dim")
|
||||
if self.current_context:
|
||||
console.print(f" 현재 컨텍스트: {self.current_context:,} 토큰", style="dim")
|
||||
|
||||
def display_session(self) -> None:
|
||||
"""현재 컨텍스트 크기를 표시합니다."""
|
||||
console.print("\n[bold]토큰 사용량:[/bold]", style=COLORS["primary"])
|
||||
|
||||
# Check if we've had any actual API calls yet (current > baseline means we have conversation)
|
||||
has_conversation = self.current_context > self.baseline_context
|
||||
|
||||
if self.baseline_context > 0:
|
||||
console.print(
|
||||
f" 기준(Baseline): {self.baseline_context:,} 토큰 [dim](시스템 + agent.md)[/dim]",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
|
||||
if not has_conversation:
|
||||
# Before first message - warn that tools aren't counted yet
|
||||
console.print(" [dim]참고: 도구 정의(~5k 토큰)는 첫 번째 메시지 이후에 포함됩니다[/dim]")
|
||||
|
||||
if has_conversation:
|
||||
tools_and_conversation = self.current_context - self.baseline_context
|
||||
console.print(f" 도구 + 대화: {tools_and_conversation:,} 토큰", style=COLORS["dim"])
|
||||
|
||||
console.print(f" 합계: {self.current_context:,} 토큰", style="bold " + COLORS["dim"])
|
||||
console.print()
|
||||
|
||||
|
||||
def render_todo_list(todos: list[dict]) -> None:
|
||||
"""작업 목록을 체크박스가 있는 rich 패널로 렌더링합니다."""
|
||||
if not todos:
|
||||
return
|
||||
|
||||
lines = []
|
||||
for todo in todos:
|
||||
status = todo.get("status", "pending")
|
||||
content = todo.get("content", "")
|
||||
|
||||
if status == "completed":
|
||||
icon = "☑"
|
||||
style = "green"
|
||||
elif status == "in_progress":
|
||||
icon = "⏳"
|
||||
style = "yellow"
|
||||
else: # pending
|
||||
icon = "☐"
|
||||
style = "dim"
|
||||
|
||||
lines.append(f"[{style}]{icon} {content}[/{style}]")
|
||||
|
||||
panel = Panel(
|
||||
"\n".join(lines),
|
||||
title="[bold]작업 목록[/bold]",
|
||||
border_style="cyan",
|
||||
box=box.ROUNDED,
|
||||
padding=(0, 1),
|
||||
)
|
||||
console.print(panel)
|
||||
|
||||
|
||||
def _format_line_span(start: int | None, end: int | None) -> str:
|
||||
if start is None and end is None:
|
||||
return ""
|
||||
if start is not None and end is None:
|
||||
return f"({start}행부터)"
|
||||
if start is None and end is not None:
|
||||
return f"({end}행까지)"
|
||||
if start == end:
|
||||
return f"({start}행)"
|
||||
return f"({start}-{end}행)"
|
||||
|
||||
|
||||
def render_file_operation(record: FileOperationRecord) -> None:
|
||||
"""파일시스템 도구 호출에 대한 간략한 요약을 렌더링합니다."""
|
||||
label_lookup = {
|
||||
"read_file": "읽기",
|
||||
"write_file": "쓰기",
|
||||
"edit_file": "업데이트",
|
||||
}
|
||||
label = label_lookup.get(record.tool_name, record.tool_name)
|
||||
header = Text()
|
||||
header.append("⏺ ", style=COLORS["tool"])
|
||||
header.append(f"{label}({record.display_path})", style=f"bold {COLORS['tool']}")
|
||||
console.print(header)
|
||||
|
||||
def _print_detail(message: str, *, style: str = COLORS["dim"]) -> None:
|
||||
detail = Text()
|
||||
detail.append(" ⎿ ", style=style)
|
||||
detail.append(message, style=style)
|
||||
console.print(detail)
|
||||
|
||||
if record.status == "error":
|
||||
_print_detail(record.error or "파일 작업 실행 오류", style="red")
|
||||
return
|
||||
|
||||
if record.tool_name == "read_file":
|
||||
lines = record.metrics.lines_read
|
||||
span = _format_line_span(record.metrics.start_line, record.metrics.end_line)
|
||||
detail = f"{lines}줄 읽음"
|
||||
if span:
|
||||
detail = f"{detail} {span}"
|
||||
_print_detail(detail)
|
||||
else:
|
||||
if record.tool_name == "write_file":
|
||||
added = record.metrics.lines_added
|
||||
removed = record.metrics.lines_removed
|
||||
lines = record.metrics.lines_written
|
||||
detail = f"{lines}줄 씀"
|
||||
if added or removed:
|
||||
detail = f"{detail} (+{added} / -{removed})"
|
||||
else:
|
||||
added = record.metrics.lines_added
|
||||
removed = record.metrics.lines_removed
|
||||
detail = f"총 {record.metrics.lines_written}줄 편집됨"
|
||||
if added or removed:
|
||||
detail = f"{detail} (+{added} / -{removed})"
|
||||
_print_detail(detail)
|
||||
|
||||
# Skip diff display for HIL-approved operations that succeeded
|
||||
# (user already saw the diff during approval)
|
||||
if record.diff and not (record.hitl_approved and record.status == "success"):
|
||||
render_diff(record)
|
||||
|
||||
|
||||
def render_diff(record: FileOperationRecord) -> None:
|
||||
"""파일 작업에 대한 diff를 렌더링합니다."""
|
||||
if not record.diff:
|
||||
return
|
||||
render_diff_block(record.diff, f"{record.display_path} 차이(Diff)")
|
||||
|
||||
|
||||
def _wrap_diff_line(
|
||||
code: str,
|
||||
marker: str,
|
||||
color: str,
|
||||
line_num: int | None,
|
||||
width: int,
|
||||
term_width: int,
|
||||
) -> list[str]:
|
||||
"""긴 diff 줄을 적절한 들여쓰기로 줄바꿈합니다.
|
||||
|
||||
Args:
|
||||
code: 래핑할 코드 콘텐츠
|
||||
marker: Diff 마커 ('+', '-', ' ')
|
||||
color: 해당 줄의 색상
|
||||
line_num: 표시할 줄 번호 (연속 줄의 경우 None)
|
||||
width: 줄 번호 열의 너비
|
||||
term_width: 터미널 너비
|
||||
|
||||
Returns:
|
||||
포맷팅된 줄 목록 (줄바꿈된 경우 여러 개일 수 있음)
|
||||
"""
|
||||
# Escape Rich markup in code content
|
||||
code = escape(code)
|
||||
|
||||
prefix_len = width + 4 # line_num + space + marker + 2 spaces
|
||||
available_width = term_width - prefix_len
|
||||
|
||||
if len(code) <= available_width:
|
||||
if line_num is not None:
|
||||
return [f"[dim]{line_num:>{width}}[/dim] [{color}]{marker} {code}[/{color}]"]
|
||||
return [f"{' ' * width} [{color}]{marker} {code}[/{color}]"]
|
||||
|
||||
lines = []
|
||||
remaining = code
|
||||
first = True
|
||||
|
||||
while remaining:
|
||||
if len(remaining) <= available_width:
|
||||
chunk = remaining
|
||||
remaining = ""
|
||||
else:
|
||||
# Try to break at a good point (space, comma, etc.)
|
||||
chunk = remaining[:available_width]
|
||||
# Look for a good break point in the last 20 chars
|
||||
break_point = max(
|
||||
chunk.rfind(" "),
|
||||
chunk.rfind(","),
|
||||
chunk.rfind("("),
|
||||
chunk.rfind(")"),
|
||||
)
|
||||
if break_point > available_width - 20:
|
||||
# Found a good break point
|
||||
chunk = remaining[: break_point + 1]
|
||||
remaining = remaining[break_point + 1 :]
|
||||
else:
|
||||
# No good break point, just split
|
||||
chunk = remaining[:available_width]
|
||||
remaining = remaining[available_width:]
|
||||
|
||||
if first and line_num is not None:
|
||||
lines.append(f"[dim]{line_num:>{width}}[/dim] [{color}]{marker} {chunk}[/{color}]")
|
||||
first = False
|
||||
else:
|
||||
lines.append(f"{' ' * width} [{color}]{marker} {chunk}[/{color}]")
|
||||
|
||||
return lines
|
||||
|
||||
|
||||
def format_diff_rich(diff_lines: list[str]) -> str:
|
||||
"""줄 번호와 색상으로 diff 줄을 포맷팅합니다.
|
||||
|
||||
Args:
|
||||
diff_lines: 통합 diff의 Diff 줄
|
||||
"""
|
||||
if not diff_lines:
|
||||
return "[dim]감지된 변경 사항 없음[/dim]"
|
||||
|
||||
# Get terminal width
|
||||
term_width = shutil.get_terminal_size().columns
|
||||
|
||||
# Find max line number for width calculation
|
||||
max_line = max(
|
||||
(
|
||||
int(m.group(i))
|
||||
for line in diff_lines
|
||||
if (m := re.match(r"@@ -(\d+)(?:,\d+)? \+(\d+)", line))
|
||||
for i in (1, 2)
|
||||
),
|
||||
default=0,
|
||||
)
|
||||
width = max(3, len(str(max_line)))
|
||||
|
||||
formatted_lines = []
|
||||
old_num = new_num = 0
|
||||
|
||||
# Rich colors with backgrounds for better visibility
|
||||
# White text on dark backgrounds for additions/deletions
|
||||
addition_color = "white on dark_green"
|
||||
deletion_color = "white on dark_red"
|
||||
context_color = "dim"
|
||||
|
||||
for line in diff_lines:
|
||||
if line.strip() == "...":
|
||||
formatted_lines.append(f"[{context_color}]...[/{context_color}]")
|
||||
elif line.startswith(("---", "+++")):
|
||||
continue
|
||||
elif m := re.match(r"@@ -(\d+)(?:,\d+)? \+(\d+)", line):
|
||||
old_num, new_num = int(m.group(1)), int(m.group(2))
|
||||
elif line.startswith("-"):
|
||||
formatted_lines.extend(_wrap_diff_line(line[1:], "-", deletion_color, old_num, width, term_width))
|
||||
old_num += 1
|
||||
elif line.startswith("+"):
|
||||
formatted_lines.extend(_wrap_diff_line(line[1:], "+", addition_color, new_num, width, term_width))
|
||||
new_num += 1
|
||||
elif line.startswith(" "):
|
||||
formatted_lines.extend(_wrap_diff_line(line[1:], " ", context_color, old_num, width, term_width))
|
||||
old_num += 1
|
||||
new_num += 1
|
||||
|
||||
return "\n".join(formatted_lines)
|
||||
|
||||
|
||||
def render_diff_block(diff: str, title: str) -> None:
|
||||
"""diff 문자열을 줄 번호와 색상으로 렌더링합니다."""
|
||||
try:
|
||||
# Parse diff into lines and format with line numbers
|
||||
diff_lines = diff.splitlines()
|
||||
formatted_diff = format_diff_rich(diff_lines)
|
||||
|
||||
# Print with a simple header
|
||||
console.print()
|
||||
console.print(f"[bold {COLORS['primary']}]═══ {title} ═══[/bold {COLORS['primary']}]")
|
||||
console.print(formatted_diff)
|
||||
console.print()
|
||||
except (ValueError, AttributeError, IndexError, OSError):
|
||||
# Fallback to simple rendering if formatting fails
|
||||
console.print()
|
||||
console.print(f"[bold {COLORS['primary']}]{title}[/bold {COLORS['primary']}]")
|
||||
console.print(diff)
|
||||
console.print()
|
||||
|
||||
|
||||
def show_interactive_help() -> None:
|
||||
"""대화형 세션 중 사용할 수 있는 명령을 표시합니다."""
|
||||
console.print()
|
||||
console.print()
|
||||
console.print("[bold]대화형 명령:[/bold]", style=COLORS["primary"])
|
||||
console.print()
|
||||
|
||||
for cmd, desc in COMMANDS.items():
|
||||
console.print(f" /{cmd:<12} {desc}", style=COLORS["dim"])
|
||||
|
||||
console.print()
|
||||
console.print("[bold]편집 기능:[/bold]", style=COLORS["primary"])
|
||||
console.print(" Enter 메시지 제출", style=COLORS["dim"])
|
||||
console.print(
|
||||
" Alt+Enter 줄바꿈 삽입 (Mac의 경우 Option+Enter, 또는 ESC 후 Enter)",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
console.print(" Ctrl+E 외부 편집기에서 열기 (기본값 nano)", style=COLORS["dim"])
|
||||
console.print(" Ctrl+T 자동 승인 모드 전환", style=COLORS["dim"])
|
||||
console.print(" 방향키 입력 탐색", style=COLORS["dim"])
|
||||
console.print(" Ctrl+C 입력 취소 또는 작업 중인 에이전트 중단", style=COLORS["dim"])
|
||||
console.print()
|
||||
console.print("[bold]특수 기능:[/bold]", style=COLORS["primary"])
|
||||
console.print(" @filename @를 입력하여 파일 자동 완성 및 콘텐츠 주입", style=COLORS["dim"])
|
||||
console.print(" /command /를 입력하여 사용 가능한 명령 확인", style=COLORS["dim"])
|
||||
console.print(
|
||||
" !command !를 입력하여 bash 명령 실행 (예: !ls, !git status)",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
console.print(" 입력하면 완성이 자동으로 나타납니다", style=COLORS["dim"])
|
||||
console.print()
|
||||
console.print("[bold]자동 승인 모드:[/bold]", style=COLORS["primary"])
|
||||
console.print(" Ctrl+T 자동 승인 모드 전환", style=COLORS["dim"])
|
||||
console.print(
|
||||
" --auto-approve 자동 승인이 활성화된 상태로 CLI 시작 (명령줄을 통해)",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
console.print(" 활성화되면 도구 작업이 확인 프롬프트 없이 실행됩니다", style=COLORS["dim"])
|
||||
console.print()
|
||||
|
||||
|
||||
def show_help() -> None:
|
||||
"""도움말 정보를 표시합니다."""
|
||||
"""Show help information."""
|
||||
console.print()
|
||||
console.print(DEEP_AGENTS_ASCII, style=f"bold {COLORS['primary']}")
|
||||
console.print()
|
||||
|
||||
console.print("[bold]사용법:[/bold]", style=COLORS["primary"])
|
||||
console.print(" deepagents [OPTIONS] 대화형 세션 시작")
|
||||
console.print(" deepagents list 사용 가능한 모든 에이전트 나열")
|
||||
console.print(" deepagents reset --agent AGENT 에이전트를 기본 프롬프트로 초기화")
|
||||
console.print(" deepagents reset --agent AGENT --target SOURCE 에이전트를 다른 에이전트의 복사본으로 초기화")
|
||||
console.print(" deepagents help 이 도움말 메시지 표시")
|
||||
console.print("[bold]Usage:[/bold]", style=COLORS["primary"])
|
||||
console.print(" deepagents [OPTIONS] Start interactive session")
|
||||
console.print(" deepagents list List all available agents")
|
||||
console.print(" deepagents reset --agent AGENT Reset agent to default prompt")
|
||||
console.print(
|
||||
" deepagents reset --agent AGENT --target SOURCE Reset agent to copy of another agent"
|
||||
)
|
||||
console.print(" deepagents help Show this help message")
|
||||
console.print()
|
||||
|
||||
console.print("[bold]옵션:[/bold]", style=COLORS["primary"])
|
||||
console.print(" --agent NAME 에이전트 식별자 (기본값: agent)")
|
||||
console.print(" --model MODEL 사용할 모델 (예: claude-sonnet-4-5-20250929, gpt-4o)")
|
||||
console.print(" --auto-approve 프롬프트 없이 도구 사용 자동 승인")
|
||||
console.print(" --sandbox TYPE 실행을 위한 원격 샌드박스 (modal, runloop, daytona)")
|
||||
console.print(" --sandbox-id ID 기존 샌드박스 재사용 (생성/정리 건너뜀)")
|
||||
console.print("[bold]Options:[/bold]", style=COLORS["primary"])
|
||||
console.print(" --agent NAME Agent identifier (default: agent)")
|
||||
console.print(
|
||||
" --model MODEL Model to use (e.g., claude-sonnet-4-5-20250929, gpt-4o)"
|
||||
)
|
||||
console.print(" --auto-approve Auto-approve tool usage without prompting")
|
||||
console.print(
|
||||
" --sandbox TYPE Remote sandbox for execution (modal, runloop, daytona)"
|
||||
)
|
||||
console.print(" --sandbox-id ID Reuse existing sandbox (skips creation/cleanup)")
|
||||
console.print(
|
||||
" -r, --resume [ID] Resume thread: -r for most recent, -r <ID> for specific"
|
||||
)
|
||||
console.print()
|
||||
|
||||
console.print("[bold]예시:[/bold]", style=COLORS["primary"])
|
||||
console.print(" deepagents # 기본 에이전트로 시작", style=COLORS["dim"])
|
||||
console.print("[bold]Examples:[/bold]", style=COLORS["primary"])
|
||||
console.print(
|
||||
" deepagents --agent mybot # 'mybot'이라는 이름의 에이전트로 시작",
|
||||
" deepagents # Start with default agent", style=COLORS["dim"]
|
||||
)
|
||||
console.print(
|
||||
" deepagents --agent mybot # Start with agent named 'mybot'",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
console.print(
|
||||
" deepagents --model gpt-4o # 특정 모델 사용 (공급자 자동 감지)",
|
||||
" deepagents --model gpt-4o # Use specific model (auto-detects provider)",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
console.print(
|
||||
" deepagents --auto-approve # 자동 승인이 활성화된 상태로 시작",
|
||||
" deepagents -r # Resume most recent session",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
console.print(
|
||||
" deepagents --sandbox runloop # Runloop 샌드박스에서 코드 실행",
|
||||
" deepagents -r abc123 # Resume specific thread",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
console.print(
|
||||
" deepagents --sandbox modal # Modal 샌드박스에서 코드 실행",
|
||||
" deepagents --auto-approve # Start with auto-approve enabled",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
console.print(
|
||||
" deepagents --sandbox runloop --sandbox-id dbx_123 # 기존 샌드박스 재사용",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
console.print(" deepagents list # 모든 에이전트 나열", style=COLORS["dim"])
|
||||
console.print(" deepagents reset --agent mybot # mybot을 기본값으로 초기화", style=COLORS["dim"])
|
||||
console.print(
|
||||
" deepagents reset --agent mybot --target other # mybot을 'other' 에이전트의 복사본으로 초기화",
|
||||
" deepagents --sandbox runloop # Execute code in Runloop sandbox",
|
||||
style=COLORS["dim"],
|
||||
)
|
||||
console.print()
|
||||
|
||||
console.print("[bold]장기 기억(Long-term Memory):[/bold]", style=COLORS["primary"])
|
||||
console.print(" 기본적으로 장기 기억은 'agent'라는 에이전트 이름을 사용하여 활성화됩니다.", style=COLORS["dim"])
|
||||
console.print(" 기억에는 다음이 포함됩니다:", style=COLORS["dim"])
|
||||
console.print(" - 지침이 포함된 영구 agent.md 파일", style=COLORS["dim"])
|
||||
console.print(" - 세션 간 컨텍스트 저장을 위한 /memories/ 폴더", style=COLORS["dim"])
|
||||
console.print()
|
||||
|
||||
console.print("[bold]에이전트 저장소:[/bold]", style=COLORS["primary"])
|
||||
console.print(" 에이전트는 다음 경로에 저장됩니다: ~/.deepagents/AGENT_NAME/", style=COLORS["dim"])
|
||||
console.print(" 각 에이전트에는 프롬프트가 포함된 agent.md 파일이 있습니다", style=COLORS["dim"])
|
||||
console.print()
|
||||
|
||||
console.print("[bold]대화형 기능:[/bold]", style=COLORS["primary"])
|
||||
console.print(" Enter 메시지 제출", style=COLORS["dim"])
|
||||
console.print("[bold]Thread Management:[/bold]", style=COLORS["primary"])
|
||||
console.print(
|
||||
" Alt+Enter 여러 줄 입력을 위한 줄바꿈 (Option+Enter 또는 ESC 후 Enter)",
|
||||
style=COLORS["dim"],
|
||||
" deepagents threads list # List all sessions", style=COLORS["dim"]
|
||||
)
|
||||
console.print(
|
||||
" deepagents threads delete <ID> # Delete a session", style=COLORS["dim"]
|
||||
)
|
||||
console.print(" Ctrl+J 줄바꿈 삽입 (대안)", style=COLORS["dim"])
|
||||
console.print(" Ctrl+T 자동 승인 모드 전환", style=COLORS["dim"])
|
||||
console.print(" 방향키 입력 탐색", style=COLORS["dim"])
|
||||
console.print(" @filename @를 입력하여 파일 자동 완성 및 콘텐츠 주입", style=COLORS["dim"])
|
||||
console.print(" /command /를 입력하여 사용 가능한 명령 확인 (자동 완성)", style=COLORS["dim"])
|
||||
console.print()
|
||||
|
||||
console.print("[bold]대화형 명령:[/bold]", style=COLORS["primary"])
|
||||
console.print(" /help 사용 가능한 명령 및 기능 표시", style=COLORS["dim"])
|
||||
console.print(" /clear 화면 지우기 및 대화 초기화", style=COLORS["dim"])
|
||||
console.print(" /tokens 현재 세션의 토큰 사용량 표시", style=COLORS["dim"])
|
||||
console.print(" /quit, /exit 세션 종료", style=COLORS["dim"])
|
||||
console.print(" quit, exit, q 세션 종료 (입력하고 Enter 누름)", style=COLORS["dim"])
|
||||
console.print("[bold]Interactive Features:[/bold]", style=COLORS["primary"])
|
||||
console.print(" Enter Submit your message", style=COLORS["dim"])
|
||||
console.print(" Ctrl+J Insert newline", style=COLORS["dim"])
|
||||
console.print(" Shift+Tab Toggle auto-approve mode", style=COLORS["dim"])
|
||||
console.print(" @filename Auto-complete files and inject content", style=COLORS["dim"])
|
||||
console.print(" /command Slash commands (/help, /clear, /quit)", style=COLORS["dim"])
|
||||
console.print(" !command Run bash commands directly", style=COLORS["dim"])
|
||||
console.print()
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
"""Textual widgets for deepagents-cli."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from deepagents_cli.widgets.chat_input import ChatInput
|
||||
from deepagents_cli.widgets.messages import (
|
||||
AssistantMessage,
|
||||
DiffMessage,
|
||||
ErrorMessage,
|
||||
SystemMessage,
|
||||
ToolCallMessage,
|
||||
UserMessage,
|
||||
)
|
||||
from deepagents_cli.widgets.status import StatusBar
|
||||
from deepagents_cli.widgets.welcome import WelcomeBanner
|
||||
|
||||
__all__ = [
|
||||
"AssistantMessage",
|
||||
"ChatInput",
|
||||
"DiffMessage",
|
||||
"ErrorMessage",
|
||||
"StatusBar",
|
||||
"SystemMessage",
|
||||
"ToolCallMessage",
|
||||
"UserMessage",
|
||||
"WelcomeBanner",
|
||||
]
|
||||
@@ -0,0 +1,199 @@
|
||||
"""Approval widget for HITL - using standard Textual patterns."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from typing import Any, ClassVar
|
||||
|
||||
from textual import events
|
||||
from textual.app import ComposeResult
|
||||
from textual.binding import Binding, BindingType
|
||||
from textual.containers import Container, Vertical, VerticalScroll
|
||||
from textual.message import Message
|
||||
from textual.widgets import Static
|
||||
|
||||
from deepagents_cli.widgets.tool_renderers import get_renderer
|
||||
|
||||
|
||||
class ApprovalMenu(Container):
|
||||
"""Approval menu using standard Textual patterns.
|
||||
|
||||
Key design decisions (following mistral-vibe reference):
|
||||
- Container base class with compose()
|
||||
- BINDINGS for key handling (not on_key)
|
||||
- can_focus_children = False to prevent focus theft
|
||||
- Simple Static widgets for options
|
||||
- Standard message posting
|
||||
- Tool-specific widgets via renderer pattern
|
||||
"""
|
||||
|
||||
can_focus = True
|
||||
can_focus_children = False
|
||||
|
||||
# CSS is in app.tcss - no DEFAULT_CSS needed
|
||||
|
||||
BINDINGS: ClassVar[list[BindingType]] = [
|
||||
Binding("up", "move_up", "Up", show=False),
|
||||
Binding("k", "move_up", "Up", show=False),
|
||||
Binding("down", "move_down", "Down", show=False),
|
||||
Binding("j", "move_down", "Down", show=False),
|
||||
Binding("enter", "select", "Select", show=False),
|
||||
Binding("1", "select_approve", "Approve", show=False),
|
||||
Binding("y", "select_approve", "Approve", show=False),
|
||||
Binding("2", "select_reject", "Reject", show=False),
|
||||
Binding("n", "select_reject", "Reject", show=False),
|
||||
Binding("3", "select_auto", "Auto-approve", show=False),
|
||||
Binding("a", "select_auto", "Auto-approve", show=False),
|
||||
]
|
||||
|
||||
class Decided(Message):
|
||||
"""Message sent when user makes a decision."""
|
||||
|
||||
def __init__(self, decision: dict[str, str]) -> None:
|
||||
super().__init__()
|
||||
self.decision = decision
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
action_request: dict[str, Any],
|
||||
assistant_id: str | None = None,
|
||||
id: str | None = None, # noqa: A002
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
super().__init__(id=id or "approval-menu", classes="approval-menu", **kwargs)
|
||||
self._action_request = action_request
|
||||
self._assistant_id = assistant_id
|
||||
self._tool_name = action_request.get("name", "unknown")
|
||||
self._tool_args = action_request.get("args", {})
|
||||
self._description = action_request.get("description", "")
|
||||
self._selected = 0
|
||||
self._future: asyncio.Future[dict[str, str]] | None = None
|
||||
self._option_widgets: list[Static] = []
|
||||
self._tool_info_container: Vertical | None = None
|
||||
|
||||
def set_future(self, future: asyncio.Future[dict[str, str]]) -> None:
|
||||
"""Set the future to resolve when user decides."""
|
||||
self._future = future
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the widget with Static children.
|
||||
|
||||
Layout prioritizes options visibility - they appear at the top so users
|
||||
always see them even in small terminals.
|
||||
"""
|
||||
# Title
|
||||
yield Static(
|
||||
f">>> {self._tool_name} Requires Approval <<<",
|
||||
classes="approval-title",
|
||||
)
|
||||
|
||||
# Options container FIRST - always visible at top
|
||||
with Container(classes="approval-options-container"):
|
||||
# Options - create 3 Static widgets
|
||||
for i in range(3):
|
||||
widget = Static("", classes="approval-option")
|
||||
self._option_widgets.append(widget)
|
||||
yield widget
|
||||
|
||||
# Help text right after options
|
||||
yield Static(
|
||||
"↑/↓ navigate • Enter select • y/n/a quick keys",
|
||||
classes="approval-help",
|
||||
)
|
||||
|
||||
# Separator between options and tool details
|
||||
yield Static("─" * 40, classes="approval-separator")
|
||||
|
||||
# Tool info in scrollable container BELOW options
|
||||
with VerticalScroll(classes="tool-info-scroll"):
|
||||
self._tool_info_container = Vertical(classes="tool-info-container")
|
||||
yield self._tool_info_container
|
||||
|
||||
async def on_mount(self) -> None:
|
||||
"""Focus self on mount and update tool info."""
|
||||
await self._update_tool_info()
|
||||
self._update_options()
|
||||
self.focus()
|
||||
|
||||
async def _update_tool_info(self) -> None:
|
||||
"""Mount the tool-specific approval widget."""
|
||||
if not self._tool_info_container:
|
||||
return
|
||||
|
||||
# Get the appropriate renderer for this tool
|
||||
renderer = get_renderer(self._tool_name)
|
||||
widget_class, data = renderer.get_approval_widget(self._tool_args)
|
||||
|
||||
# Clear existing content and mount new widget
|
||||
await self._tool_info_container.remove_children()
|
||||
approval_widget = widget_class(data)
|
||||
await self._tool_info_container.mount(approval_widget)
|
||||
|
||||
def _update_options(self) -> None:
|
||||
"""Update option widgets based on selection."""
|
||||
options = [
|
||||
"1. Approve (y)",
|
||||
"2. Reject (n)",
|
||||
"3. Auto-approve all this session (a)",
|
||||
]
|
||||
|
||||
for i, (text, widget) in enumerate(zip(options, self._option_widgets, strict=True)):
|
||||
cursor = "› " if i == self._selected else " "
|
||||
widget.update(f"{cursor}{text}")
|
||||
|
||||
# Update classes
|
||||
widget.remove_class("approval-option-selected")
|
||||
if i == self._selected:
|
||||
widget.add_class("approval-option-selected")
|
||||
|
||||
def action_move_up(self) -> None:
|
||||
"""Move selection up."""
|
||||
self._selected = (self._selected - 1) % 3
|
||||
self._update_options()
|
||||
|
||||
def action_move_down(self) -> None:
|
||||
"""Move selection down."""
|
||||
self._selected = (self._selected + 1) % 3
|
||||
self._update_options()
|
||||
|
||||
def action_select(self) -> None:
|
||||
"""Select current option."""
|
||||
self._handle_selection(self._selected)
|
||||
|
||||
def action_select_approve(self) -> None:
|
||||
"""Select approve option."""
|
||||
self._selected = 0
|
||||
self._update_options()
|
||||
self._handle_selection(0)
|
||||
|
||||
def action_select_reject(self) -> None:
|
||||
"""Select reject option."""
|
||||
self._selected = 1
|
||||
self._update_options()
|
||||
self._handle_selection(1)
|
||||
|
||||
def action_select_auto(self) -> None:
|
||||
"""Select auto-approve option."""
|
||||
self._selected = 2
|
||||
self._update_options()
|
||||
self._handle_selection(2)
|
||||
|
||||
def _handle_selection(self, option: int) -> None:
|
||||
"""Handle the selected option."""
|
||||
decision_map = {
|
||||
0: "approve",
|
||||
1: "reject",
|
||||
2: "auto_approve_all",
|
||||
}
|
||||
decision = {"type": decision_map[option]}
|
||||
|
||||
# Resolve the future
|
||||
if self._future and not self._future.done():
|
||||
self._future.set_result(decision)
|
||||
|
||||
# Post message
|
||||
self.post_message(self.Decided(decision))
|
||||
|
||||
def on_blur(self, event: events.Blur) -> None:
|
||||
"""Re-focus on blur to keep focus trapped."""
|
||||
self.call_after_refresh(self.focus)
|
||||
@@ -0,0 +1,522 @@
|
||||
"""Autocomplete system for @ mentions and / commands.
|
||||
|
||||
This is a custom implementation that handles trigger-based completion
|
||||
for slash commands (/) and file mentions (@).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import subprocess
|
||||
from difflib import SequenceMatcher
|
||||
from enum import StrEnum
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Protocol
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from textual import events
|
||||
|
||||
|
||||
class CompletionResult(StrEnum):
|
||||
"""Result of handling a key event in the completion system."""
|
||||
|
||||
IGNORED = "ignored" # Key not handled, let default behavior proceed
|
||||
HANDLED = "handled" # Key handled, prevent default
|
||||
SUBMIT = "submit" # Key triggers submission (e.g., Enter on slash command)
|
||||
|
||||
|
||||
class CompletionView(Protocol):
|
||||
"""Protocol for views that can display completion suggestions."""
|
||||
|
||||
def render_completion_suggestions(
|
||||
self, suggestions: list[tuple[str, str]], selected_index: int
|
||||
) -> None:
|
||||
"""Render the completion suggestions popup.
|
||||
|
||||
Args:
|
||||
suggestions: List of (label, description) tuples
|
||||
selected_index: Index of currently selected item
|
||||
"""
|
||||
...
|
||||
|
||||
def clear_completion_suggestions(self) -> None:
|
||||
"""Hide/clear the completion suggestions popup."""
|
||||
...
|
||||
|
||||
def replace_completion_range(self, start: int, end: int, replacement: str) -> None:
|
||||
"""Replace text in the input from start to end with replacement.
|
||||
|
||||
Args:
|
||||
start: Start index in the input text
|
||||
end: End index in the input text
|
||||
replacement: Text to insert
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class CompletionController(Protocol):
|
||||
"""Protocol for completion controllers."""
|
||||
|
||||
def can_handle(self, text: str, cursor_index: int) -> bool:
|
||||
"""Check if this controller can handle the current input state."""
|
||||
...
|
||||
|
||||
def on_text_changed(self, text: str, cursor_index: int) -> None:
|
||||
"""Called when input text changes."""
|
||||
...
|
||||
|
||||
def on_key(self, event: events.Key, text: str, cursor_index: int) -> CompletionResult:
|
||||
"""Handle a key event. Returns how the event was handled."""
|
||||
...
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset/clear the completion state."""
|
||||
...
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Slash Command Completion
|
||||
# ============================================================================
|
||||
|
||||
# Built-in slash commands with descriptions
|
||||
SLASH_COMMANDS: list[tuple[str, str]] = [
|
||||
("/help", "Show help"),
|
||||
("/clear", "Clear chat and start new session"),
|
||||
("/quit", "Exit app"),
|
||||
("/exit", "Exit app"),
|
||||
("/tokens", "Token usage"),
|
||||
("/threads", "Show session info"),
|
||||
]
|
||||
|
||||
MAX_SUGGESTIONS = 10
|
||||
|
||||
|
||||
class SlashCommandController:
|
||||
"""Controller for / slash command completion."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
commands: list[tuple[str, str]],
|
||||
view: CompletionView,
|
||||
) -> None:
|
||||
"""Initialize the slash command controller.
|
||||
|
||||
Args:
|
||||
commands: List of (command, description) tuples
|
||||
view: View to render suggestions to
|
||||
"""
|
||||
self._commands = commands
|
||||
self._view = view
|
||||
self._suggestions: list[tuple[str, str]] = []
|
||||
self._selected_index = 0
|
||||
|
||||
def can_handle(self, text: str, cursor_index: int) -> bool: # noqa: ARG002
|
||||
"""Handle input that starts with /."""
|
||||
return text.startswith("/")
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Clear suggestions."""
|
||||
if self._suggestions:
|
||||
self._suggestions.clear()
|
||||
self._selected_index = 0
|
||||
self._view.clear_completion_suggestions()
|
||||
|
||||
def on_text_changed(self, text: str, cursor_index: int) -> None:
|
||||
"""Update suggestions when text changes."""
|
||||
if cursor_index < 0 or cursor_index > len(text):
|
||||
self.reset()
|
||||
return
|
||||
|
||||
if not self.can_handle(text, cursor_index):
|
||||
self.reset()
|
||||
return
|
||||
|
||||
# Get the search string (text after /)
|
||||
search = text[1:cursor_index].lower()
|
||||
|
||||
# Filter commands that match
|
||||
suggestions = [
|
||||
(cmd, desc) for cmd, desc in self._commands if cmd.lower().startswith("/" + search)
|
||||
]
|
||||
|
||||
if len(suggestions) > MAX_SUGGESTIONS:
|
||||
suggestions = suggestions[:MAX_SUGGESTIONS]
|
||||
|
||||
if suggestions:
|
||||
self._suggestions = suggestions
|
||||
self._selected_index = 0
|
||||
self._view.render_completion_suggestions(self._suggestions, self._selected_index)
|
||||
else:
|
||||
self.reset()
|
||||
|
||||
def on_key( # noqa: PLR0911
|
||||
self, event: events.Key, _text: str, cursor_index: int
|
||||
) -> CompletionResult:
|
||||
"""Handle key events for navigation and selection."""
|
||||
if not self._suggestions:
|
||||
return CompletionResult.IGNORED
|
||||
|
||||
match event.key:
|
||||
case "tab":
|
||||
if self._apply_selected_completion(cursor_index):
|
||||
return CompletionResult.HANDLED
|
||||
return CompletionResult.IGNORED
|
||||
case "enter":
|
||||
if self._apply_selected_completion(cursor_index):
|
||||
return CompletionResult.SUBMIT
|
||||
return CompletionResult.HANDLED
|
||||
case "down":
|
||||
self._move_selection(1)
|
||||
return CompletionResult.HANDLED
|
||||
case "up":
|
||||
self._move_selection(-1)
|
||||
return CompletionResult.HANDLED
|
||||
case "escape":
|
||||
self.reset()
|
||||
return CompletionResult.HANDLED
|
||||
case _:
|
||||
return CompletionResult.IGNORED
|
||||
|
||||
def _move_selection(self, delta: int) -> None:
|
||||
"""Move selection up or down."""
|
||||
if not self._suggestions:
|
||||
return
|
||||
count = len(self._suggestions)
|
||||
self._selected_index = (self._selected_index + delta) % count
|
||||
self._view.render_completion_suggestions(self._suggestions, self._selected_index)
|
||||
|
||||
def _apply_selected_completion(self, cursor_index: int) -> bool:
|
||||
"""Apply the currently selected completion."""
|
||||
if not self._suggestions:
|
||||
return False
|
||||
|
||||
command, _ = self._suggestions[self._selected_index]
|
||||
# Replace from start to cursor with the command
|
||||
self._view.replace_completion_range(0, cursor_index, command)
|
||||
self.reset()
|
||||
return True
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Fuzzy File Completion (from project root)
|
||||
# ============================================================================
|
||||
|
||||
# Constants for fuzzy file completion
|
||||
_MAX_FALLBACK_FILES = 1000
|
||||
_MIN_FUZZY_RATIO = 0.4
|
||||
_MIN_FUZZY_SCORE = 15 # Minimum score to include in results
|
||||
|
||||
|
||||
def _find_project_root(start_path: Path) -> Path:
|
||||
"""Find git root or return start_path."""
|
||||
current = start_path.resolve()
|
||||
for parent in [current, *list(current.parents)]:
|
||||
if (parent / ".git").exists():
|
||||
return parent
|
||||
return start_path
|
||||
|
||||
|
||||
def _get_project_files(root: Path) -> list[str]:
|
||||
"""Get project files using git ls-files or fallback to glob."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "ls-files"], # noqa: S607
|
||||
cwd=root,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5,
|
||||
check=False,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
files = result.stdout.strip().split("\n")
|
||||
return [f for f in files if f] # Filter empty strings
|
||||
except (subprocess.TimeoutExpired, FileNotFoundError, OSError):
|
||||
pass
|
||||
|
||||
# Fallback: simple glob (limited depth to avoid slowness)
|
||||
files = []
|
||||
try:
|
||||
for pattern in ["*", "*/*", "*/*/*", "*/*/*/*"]:
|
||||
for p in root.glob(pattern):
|
||||
if p.is_file() and not any(part.startswith(".") for part in p.parts):
|
||||
files.append(str(p.relative_to(root)))
|
||||
if len(files) >= _MAX_FALLBACK_FILES:
|
||||
break
|
||||
if len(files) >= _MAX_FALLBACK_FILES:
|
||||
break
|
||||
except OSError:
|
||||
pass
|
||||
return files
|
||||
|
||||
|
||||
def _fuzzy_score(query: str, candidate: str) -> float: # noqa: PLR0911
|
||||
"""Score a candidate against query. Higher = better match."""
|
||||
query_lower = query.lower()
|
||||
candidate_lower = candidate.lower()
|
||||
|
||||
# Extract filename for matching (prioritize filename over full path)
|
||||
filename = candidate.rsplit("/", 1)[-1].lower()
|
||||
filename_start = candidate_lower.rfind("/") + 1
|
||||
|
||||
# Check filename first (higher priority)
|
||||
if query_lower in filename:
|
||||
idx = filename.find(query_lower)
|
||||
# Bonus for being at start of filename
|
||||
if idx == 0:
|
||||
return 150 + (1 / len(candidate))
|
||||
# Bonus for word boundary in filename
|
||||
if idx > 0 and filename[idx - 1] in "_-.":
|
||||
return 120 + (1 / len(candidate))
|
||||
return 100 + (1 / len(candidate))
|
||||
|
||||
# Check full path
|
||||
if query_lower in candidate_lower:
|
||||
idx = candidate_lower.find(query_lower)
|
||||
# At start of filename
|
||||
if idx == filename_start:
|
||||
return 80 + (1 / len(candidate))
|
||||
# At word boundary in path
|
||||
if idx == 0 or candidate[idx - 1] in "/_-.":
|
||||
return 60 + (1 / len(candidate))
|
||||
return 40 + (1 / len(candidate))
|
||||
|
||||
# Fuzzy match on filename only (more relevant)
|
||||
filename_ratio = SequenceMatcher(None, query_lower, filename).ratio()
|
||||
if filename_ratio > _MIN_FUZZY_RATIO:
|
||||
return filename_ratio * 30
|
||||
|
||||
# Fallback: fuzzy on full path
|
||||
ratio = SequenceMatcher(None, query_lower, candidate_lower).ratio()
|
||||
return ratio * 15
|
||||
|
||||
|
||||
def _is_dotpath(path: str) -> bool:
|
||||
"""Check if path contains dotfiles/dotdirs (e.g., .github/...)."""
|
||||
return any(part.startswith(".") for part in path.split("/"))
|
||||
|
||||
|
||||
def _path_depth(path: str) -> int:
|
||||
"""Get depth of path (number of / separators)."""
|
||||
return path.count("/")
|
||||
|
||||
|
||||
def _fuzzy_search(
|
||||
query: str, candidates: list[str], limit: int = 10, *, include_dotfiles: bool = False
|
||||
) -> list[str]:
|
||||
"""Return top matches sorted by score.
|
||||
|
||||
Args:
|
||||
query: Search query
|
||||
candidates: List of file paths to search
|
||||
limit: Max results to return
|
||||
include_dotfiles: Whether to include dotfiles (default False)
|
||||
"""
|
||||
# Filter dotfiles unless explicitly searching for them
|
||||
filtered = candidates if include_dotfiles else [c for c in candidates if not _is_dotpath(c)]
|
||||
|
||||
if not query:
|
||||
# Empty query: show root-level files first, sorted by depth then name
|
||||
sorted_files = sorted(filtered, key=lambda p: (_path_depth(p), p.lower()))
|
||||
return sorted_files[:limit]
|
||||
|
||||
scored = [(score, c) for c in filtered if (score := _fuzzy_score(query, c)) >= _MIN_FUZZY_SCORE]
|
||||
scored.sort(key=lambda x: -x[0])
|
||||
return [c for _, c in scored[:limit]]
|
||||
|
||||
|
||||
class FuzzyFileController:
|
||||
"""Controller for @ file completion with fuzzy matching from project root."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
view: CompletionView,
|
||||
cwd: Path | None = None,
|
||||
) -> None:
|
||||
"""Initialize the fuzzy file controller.
|
||||
|
||||
Args:
|
||||
view: View to render suggestions to
|
||||
cwd: Starting directory to find project root from
|
||||
"""
|
||||
self._view = view
|
||||
self._cwd = cwd or Path.cwd()
|
||||
self._project_root = _find_project_root(self._cwd)
|
||||
self._suggestions: list[tuple[str, str]] = []
|
||||
self._selected_index = 0
|
||||
self._file_cache: list[str] | None = None
|
||||
|
||||
def _get_files(self) -> list[str]:
|
||||
"""Get cached file list or refresh."""
|
||||
if self._file_cache is None:
|
||||
self._file_cache = _get_project_files(self._project_root)
|
||||
return self._file_cache
|
||||
|
||||
def refresh_cache(self) -> None:
|
||||
"""Force refresh of file cache."""
|
||||
self._file_cache = None
|
||||
|
||||
def can_handle(self, text: str, cursor_index: int) -> bool:
|
||||
"""Handle input that contains @ not followed by space."""
|
||||
if cursor_index <= 0 or cursor_index > len(text):
|
||||
return False
|
||||
|
||||
before_cursor = text[:cursor_index]
|
||||
if "@" not in before_cursor:
|
||||
return False
|
||||
|
||||
at_index = before_cursor.rfind("@")
|
||||
if cursor_index <= at_index:
|
||||
return False
|
||||
|
||||
# Fragment from @ to cursor must not contain spaces
|
||||
fragment = before_cursor[at_index:cursor_index]
|
||||
return bool(fragment) and " " not in fragment
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Clear suggestions."""
|
||||
if self._suggestions:
|
||||
self._suggestions.clear()
|
||||
self._selected_index = 0
|
||||
self._view.clear_completion_suggestions()
|
||||
|
||||
def on_text_changed(self, text: str, cursor_index: int) -> None:
|
||||
"""Update suggestions when text changes."""
|
||||
if not self.can_handle(text, cursor_index):
|
||||
self.reset()
|
||||
return
|
||||
|
||||
before_cursor = text[:cursor_index]
|
||||
at_index = before_cursor.rfind("@")
|
||||
search = before_cursor[at_index + 1 :]
|
||||
|
||||
suggestions = self._get_fuzzy_suggestions(search)
|
||||
|
||||
if suggestions:
|
||||
self._suggestions = suggestions
|
||||
self._selected_index = 0
|
||||
self._view.render_completion_suggestions(self._suggestions, self._selected_index)
|
||||
else:
|
||||
self.reset()
|
||||
|
||||
def _get_fuzzy_suggestions(self, search: str) -> list[tuple[str, str]]:
|
||||
"""Get fuzzy file suggestions."""
|
||||
files = self._get_files()
|
||||
# Include dotfiles only if query starts with "."
|
||||
include_dots = search.startswith(".")
|
||||
matches = _fuzzy_search(search, files, limit=MAX_SUGGESTIONS, include_dotfiles=include_dots)
|
||||
|
||||
suggestions: list[tuple[str, str]] = []
|
||||
for path in matches:
|
||||
# Get file extension for type hint
|
||||
ext = Path(path).suffix.lower()
|
||||
type_hint = ext[1:] if ext else "file"
|
||||
suggestions.append((f"@{path}", type_hint))
|
||||
|
||||
return suggestions
|
||||
|
||||
def on_key( # noqa: PLR0911
|
||||
self, event: events.Key, text: str, cursor_index: int
|
||||
) -> CompletionResult:
|
||||
"""Handle key events for navigation and selection."""
|
||||
if not self._suggestions:
|
||||
return CompletionResult.IGNORED
|
||||
|
||||
match event.key:
|
||||
case "tab" | "enter":
|
||||
if self._apply_selected_completion(text, cursor_index):
|
||||
return CompletionResult.HANDLED
|
||||
return CompletionResult.IGNORED
|
||||
case "down":
|
||||
self._move_selection(1)
|
||||
return CompletionResult.HANDLED
|
||||
case "up":
|
||||
self._move_selection(-1)
|
||||
return CompletionResult.HANDLED
|
||||
case "escape":
|
||||
self.reset()
|
||||
return CompletionResult.HANDLED
|
||||
case _:
|
||||
return CompletionResult.IGNORED
|
||||
|
||||
def _move_selection(self, delta: int) -> None:
|
||||
"""Move selection up or down."""
|
||||
if not self._suggestions:
|
||||
return
|
||||
count = len(self._suggestions)
|
||||
self._selected_index = (self._selected_index + delta) % count
|
||||
self._view.render_completion_suggestions(self._suggestions, self._selected_index)
|
||||
|
||||
def _apply_selected_completion(self, text: str, cursor_index: int) -> bool:
|
||||
"""Apply the currently selected completion."""
|
||||
if not self._suggestions:
|
||||
return False
|
||||
|
||||
label, _ = self._suggestions[self._selected_index]
|
||||
before_cursor = text[:cursor_index]
|
||||
at_index = before_cursor.rfind("@")
|
||||
|
||||
if at_index < 0:
|
||||
return False
|
||||
|
||||
# Replace from @ to cursor with the completion
|
||||
self._view.replace_completion_range(at_index, cursor_index, label)
|
||||
self.reset()
|
||||
return True
|
||||
|
||||
|
||||
# Keep old name as alias for backwards compatibility
|
||||
PathCompletionController = FuzzyFileController
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Multi-Completion Manager
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class MultiCompletionManager:
|
||||
"""Manages multiple completion controllers, delegating to the active one."""
|
||||
|
||||
def __init__(self, controllers: list[CompletionController]) -> None:
|
||||
"""Initialize with a list of controllers.
|
||||
|
||||
Args:
|
||||
controllers: List of completion controllers (checked in order)
|
||||
"""
|
||||
self._controllers = controllers
|
||||
self._active: CompletionController | None = None
|
||||
|
||||
def on_text_changed(self, text: str, cursor_index: int) -> None:
|
||||
"""Handle text change, activating the appropriate controller."""
|
||||
# Find the first controller that can handle this input
|
||||
candidate = None
|
||||
for controller in self._controllers:
|
||||
if controller.can_handle(text, cursor_index):
|
||||
candidate = controller
|
||||
break
|
||||
|
||||
# No controller can handle - reset if we had one active
|
||||
if candidate is None:
|
||||
if self._active is not None:
|
||||
self._active.reset()
|
||||
self._active = None
|
||||
return
|
||||
|
||||
# Switch to new controller if different
|
||||
if candidate is not self._active:
|
||||
if self._active is not None:
|
||||
self._active.reset()
|
||||
self._active = candidate
|
||||
|
||||
# Let the active controller process the change
|
||||
candidate.on_text_changed(text, cursor_index)
|
||||
|
||||
def on_key(self, event: events.Key, text: str, cursor_index: int) -> CompletionResult:
|
||||
"""Handle key event, delegating to active controller."""
|
||||
if self._active is None:
|
||||
return CompletionResult.IGNORED
|
||||
return self._active.on_key(event, text, cursor_index)
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset all controllers."""
|
||||
if self._active is not None:
|
||||
self._active.reset()
|
||||
self._active = None
|
||||
@@ -0,0 +1,537 @@
|
||||
"""Chat input widget for deepagents-cli with autocomplete and history support."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any, ClassVar
|
||||
|
||||
from rich.text import Text
|
||||
from textual import events # noqa: TC002 - used at runtime in _on_key
|
||||
from textual.binding import Binding
|
||||
from textual.containers import Horizontal, Vertical
|
||||
from textual.message import Message
|
||||
from textual.reactive import reactive
|
||||
from textual.widgets import Static, TextArea
|
||||
|
||||
from deepagents_cli.widgets.autocomplete import (
|
||||
SLASH_COMMANDS,
|
||||
CompletionResult,
|
||||
FuzzyFileController,
|
||||
MultiCompletionManager,
|
||||
SlashCommandController,
|
||||
)
|
||||
from deepagents_cli.widgets.history import HistoryManager
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from textual.app import ComposeResult
|
||||
|
||||
|
||||
class CompletionPopup(Static):
|
||||
"""Popup widget that displays completion suggestions."""
|
||||
|
||||
DEFAULT_CSS = """
|
||||
CompletionPopup {
|
||||
display: none;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
"""Initialize the completion popup."""
|
||||
super().__init__("", **kwargs)
|
||||
self.can_focus = False
|
||||
|
||||
def update_suggestions(self, suggestions: list[tuple[str, str]], selected_index: int) -> None:
|
||||
"""Update the popup with new suggestions."""
|
||||
if not suggestions:
|
||||
self.hide()
|
||||
return
|
||||
|
||||
text = Text()
|
||||
for idx, (label, description) in enumerate(suggestions):
|
||||
if idx:
|
||||
text.append("\n")
|
||||
|
||||
if idx == selected_index:
|
||||
label_style = "bold reverse"
|
||||
desc_style = "italic"
|
||||
else:
|
||||
label_style = "bold"
|
||||
desc_style = "dim"
|
||||
|
||||
text.append(label, style=label_style)
|
||||
if description:
|
||||
text.append(" ")
|
||||
text.append(description, style=desc_style)
|
||||
|
||||
self.update(text)
|
||||
self.show()
|
||||
|
||||
def hide(self) -> None:
|
||||
"""Hide the popup."""
|
||||
self.update("")
|
||||
self.styles.display = "none"
|
||||
|
||||
def show(self) -> None:
|
||||
"""Show the popup."""
|
||||
self.styles.display = "block"
|
||||
|
||||
|
||||
class ChatTextArea(TextArea):
|
||||
"""TextArea subclass with custom key handling for chat input."""
|
||||
|
||||
BINDINGS: ClassVar[list[Binding]] = [
|
||||
Binding(
|
||||
"shift+enter,ctrl+j,alt+enter,ctrl+enter",
|
||||
"insert_newline",
|
||||
"New Line",
|
||||
show=False,
|
||||
priority=True,
|
||||
),
|
||||
Binding(
|
||||
"ctrl+a",
|
||||
"select_all_text",
|
||||
"Select All",
|
||||
show=False,
|
||||
priority=True,
|
||||
),
|
||||
# Mac Cmd+Z/Cmd+Shift+Z for undo/redo (in addition to Ctrl+Z/Y)
|
||||
Binding("cmd+z,super+z", "undo", "Undo", show=False, priority=True),
|
||||
Binding("cmd+shift+z,super+shift+z", "redo", "Redo", show=False, priority=True),
|
||||
]
|
||||
|
||||
class Submitted(Message):
|
||||
"""Message sent when text is submitted."""
|
||||
|
||||
def __init__(self, value: str) -> None:
|
||||
"""Initialize with submitted value."""
|
||||
self.value = value
|
||||
super().__init__()
|
||||
|
||||
class HistoryPrevious(Message):
|
||||
"""Request previous history entry."""
|
||||
|
||||
def __init__(self, current_text: str) -> None:
|
||||
"""Initialize with current text for saving."""
|
||||
self.current_text = current_text
|
||||
super().__init__()
|
||||
|
||||
class HistoryNext(Message):
|
||||
"""Request next history entry."""
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
"""Initialize the chat text area."""
|
||||
# Remove placeholder if passed, TextArea doesn't support it the same way
|
||||
kwargs.pop("placeholder", None)
|
||||
super().__init__(**kwargs)
|
||||
self._navigating_history = False
|
||||
self._completion_active = False
|
||||
self._app_has_focus = True
|
||||
|
||||
def set_app_focus(self, *, has_focus: bool) -> None:
|
||||
"""Set whether the app should show the cursor as active.
|
||||
|
||||
When has_focus=False (e.g., agent is running), disables cursor blink
|
||||
so the cursor doesn't flash while waiting for a response.
|
||||
"""
|
||||
self._app_has_focus = has_focus
|
||||
self.cursor_blink = has_focus
|
||||
if has_focus and not self.has_focus:
|
||||
self.call_after_refresh(self.focus)
|
||||
|
||||
def set_completion_active(self, *, active: bool) -> None:
|
||||
"""Set whether completion suggestions are visible."""
|
||||
self._completion_active = active
|
||||
|
||||
def action_insert_newline(self) -> None:
|
||||
"""Insert a newline character."""
|
||||
self.insert("\n")
|
||||
|
||||
def action_select_all_text(self) -> None:
|
||||
"""Select all text in the text area."""
|
||||
if not self.text:
|
||||
return
|
||||
# Select from start to end
|
||||
lines = self.text.split("\n")
|
||||
end_row = len(lines) - 1
|
||||
end_col = len(lines[end_row])
|
||||
self.selection = ((0, 0), (end_row, end_col))
|
||||
|
||||
async def _on_key(self, event: events.Key) -> None:
|
||||
"""Handle key events."""
|
||||
# Modifier+Enter inserts newline (Ctrl+J is most reliable across terminals)
|
||||
if event.key in ("shift+enter", "ctrl+j", "alt+enter", "ctrl+enter"):
|
||||
event.prevent_default()
|
||||
event.stop()
|
||||
self.insert("\n")
|
||||
return
|
||||
|
||||
# If completion is active, let parent handle navigation keys
|
||||
if self._completion_active and event.key in ("up", "down", "tab", "enter"):
|
||||
# Prevent TextArea's default behavior (e.g., Enter inserting newline)
|
||||
# but let event bubble to ChatInput for completion handling
|
||||
event.prevent_default()
|
||||
return
|
||||
|
||||
# Plain Enter submits
|
||||
if event.key == "enter":
|
||||
event.prevent_default()
|
||||
event.stop()
|
||||
value = self.text.strip()
|
||||
if value:
|
||||
self.post_message(self.Submitted(value))
|
||||
return
|
||||
|
||||
# Up arrow on first line = history previous
|
||||
if event.key == "up":
|
||||
row, _ = self.cursor_location
|
||||
if row == 0:
|
||||
event.prevent_default()
|
||||
event.stop()
|
||||
self._navigating_history = True
|
||||
self.post_message(self.HistoryPrevious(self.text))
|
||||
return
|
||||
|
||||
# Down arrow on last line = history next
|
||||
if event.key == "down":
|
||||
row, _ = self.cursor_location
|
||||
total_lines = self.text.count("\n") + 1
|
||||
if row == total_lines - 1:
|
||||
event.prevent_default()
|
||||
event.stop()
|
||||
self._navigating_history = True
|
||||
self.post_message(self.HistoryNext())
|
||||
return
|
||||
|
||||
await super()._on_key(event)
|
||||
|
||||
def set_text_from_history(self, text: str) -> None:
|
||||
"""Set text from history navigation."""
|
||||
self._navigating_history = True
|
||||
self.text = text
|
||||
# Move cursor to end
|
||||
lines = text.split("\n")
|
||||
last_row = len(lines) - 1
|
||||
last_col = len(lines[last_row])
|
||||
self.move_cursor((last_row, last_col))
|
||||
self._navigating_history = False
|
||||
|
||||
def clear_text(self) -> None:
|
||||
"""Clear the text area."""
|
||||
self.text = ""
|
||||
self.move_cursor((0, 0))
|
||||
|
||||
|
||||
class ChatInput(Vertical):
|
||||
"""Chat input widget with prompt indicator, multi-line text, autocomplete, and history.
|
||||
|
||||
Features:
|
||||
- Multi-line input with TextArea
|
||||
- Enter to submit, Ctrl+J for newlines (most reliable across terminals)
|
||||
- Up/Down arrows for command history on first/last line
|
||||
- Autocomplete for @ (files) and / (commands)
|
||||
"""
|
||||
|
||||
DEFAULT_CSS = """
|
||||
ChatInput {
|
||||
height: auto;
|
||||
min-height: 3;
|
||||
max-height: 12;
|
||||
padding: 0;
|
||||
background: $surface;
|
||||
border: solid $primary;
|
||||
}
|
||||
|
||||
ChatInput .input-row {
|
||||
height: auto;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
ChatInput .input-prompt {
|
||||
width: 3;
|
||||
height: 1;
|
||||
padding: 0 1;
|
||||
color: $primary;
|
||||
text-style: bold;
|
||||
}
|
||||
|
||||
ChatInput ChatTextArea {
|
||||
width: 1fr;
|
||||
height: auto;
|
||||
min-height: 1;
|
||||
max-height: 8;
|
||||
border: none;
|
||||
background: transparent;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
ChatInput ChatTextArea:focus {
|
||||
border: none;
|
||||
}
|
||||
"""
|
||||
|
||||
class Submitted(Message):
|
||||
"""Message sent when input is submitted."""
|
||||
|
||||
def __init__(self, value: str, mode: str = "normal") -> None:
|
||||
"""Initialize with value and mode."""
|
||||
super().__init__()
|
||||
self.value = value
|
||||
self.mode = mode
|
||||
|
||||
class ModeChanged(Message):
|
||||
"""Message sent when input mode changes."""
|
||||
|
||||
def __init__(self, mode: str) -> None:
|
||||
"""Initialize with new mode."""
|
||||
super().__init__()
|
||||
self.mode = mode
|
||||
|
||||
mode: reactive[str] = reactive("normal")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
cwd: str | Path | None = None,
|
||||
history_file: Path | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize the chat input widget.
|
||||
|
||||
Args:
|
||||
cwd: Current working directory for file completion
|
||||
history_file: Path to history file (default: ~/.deepagents/history.jsonl)
|
||||
**kwargs: Additional arguments for parent
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self._cwd = Path(cwd) if cwd else Path.cwd()
|
||||
self._text_area: ChatTextArea | None = None
|
||||
self._popup: CompletionPopup | None = None
|
||||
self._completion_manager: MultiCompletionManager | None = None
|
||||
|
||||
# Set up history manager
|
||||
if history_file is None:
|
||||
history_file = Path.home() / ".deepagents" / "history.jsonl"
|
||||
self._history = HistoryManager(history_file)
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the chat input layout."""
|
||||
with Horizontal(classes="input-row"):
|
||||
yield Static(">", classes="input-prompt", id="prompt")
|
||||
yield ChatTextArea(id="chat-input")
|
||||
|
||||
yield CompletionPopup(id="completion-popup")
|
||||
|
||||
def on_mount(self) -> None:
|
||||
"""Initialize components after mount."""
|
||||
self._text_area = self.query_one("#chat-input", ChatTextArea)
|
||||
self._popup = self.query_one("#completion-popup", CompletionPopup)
|
||||
|
||||
self._completion_manager = MultiCompletionManager(
|
||||
[
|
||||
SlashCommandController(SLASH_COMMANDS, self),
|
||||
FuzzyFileController(self, cwd=self._cwd),
|
||||
]
|
||||
)
|
||||
|
||||
self._text_area.focus()
|
||||
|
||||
def on_text_area_changed(self, event: TextArea.Changed) -> None:
|
||||
"""Detect input mode and update completions."""
|
||||
text = event.text_area.text
|
||||
|
||||
# Update mode based on first character
|
||||
if text.startswith("!"):
|
||||
self.mode = "bash"
|
||||
elif text.startswith("/"):
|
||||
self.mode = "command"
|
||||
else:
|
||||
self.mode = "normal"
|
||||
|
||||
# Skip completion during history navigation to avoid popup flashing
|
||||
if self._text_area and self._text_area._navigating_history:
|
||||
if self._completion_manager:
|
||||
self._completion_manager.reset()
|
||||
return
|
||||
|
||||
# Update completion suggestions
|
||||
if self._completion_manager and self._text_area:
|
||||
cursor_offset = self._get_cursor_offset()
|
||||
self._completion_manager.on_text_changed(text, cursor_offset)
|
||||
|
||||
def on_chat_text_area_submitted(self, event: ChatTextArea.Submitted) -> None:
|
||||
"""Handle text submission."""
|
||||
value = event.value
|
||||
if value:
|
||||
if self._completion_manager:
|
||||
self._completion_manager.reset()
|
||||
|
||||
self._history.add(value)
|
||||
self.post_message(self.Submitted(value, self.mode))
|
||||
if self._text_area:
|
||||
self._text_area.clear_text()
|
||||
self.mode = "normal"
|
||||
|
||||
def on_chat_text_area_history_previous(self, event: ChatTextArea.HistoryPrevious) -> None:
|
||||
"""Handle history previous request."""
|
||||
entry = self._history.get_previous(event.current_text)
|
||||
if entry is not None and self._text_area:
|
||||
self._text_area.set_text_from_history(entry)
|
||||
|
||||
def on_chat_text_area_history_next(
|
||||
self,
|
||||
event: ChatTextArea.HistoryNext, # noqa: ARG002
|
||||
) -> None:
|
||||
"""Handle history next request."""
|
||||
entry = self._history.get_next()
|
||||
if entry is not None and self._text_area:
|
||||
self._text_area.set_text_from_history(entry)
|
||||
|
||||
async def on_key(self, event: events.Key) -> None:
|
||||
"""Handle key events for completion navigation."""
|
||||
if not self._completion_manager or not self._text_area:
|
||||
return
|
||||
|
||||
text = self._text_area.text
|
||||
cursor = self._get_cursor_offset()
|
||||
|
||||
result = self._completion_manager.on_key(event, text, cursor)
|
||||
|
||||
match result:
|
||||
case CompletionResult.HANDLED:
|
||||
event.prevent_default()
|
||||
event.stop()
|
||||
case CompletionResult.SUBMIT:
|
||||
event.prevent_default()
|
||||
event.stop()
|
||||
value = self._text_area.text.strip()
|
||||
if value:
|
||||
self._completion_manager.reset()
|
||||
self._history.add(value)
|
||||
self.post_message(self.Submitted(value, self.mode))
|
||||
self._text_area.clear_text()
|
||||
self.mode = "normal"
|
||||
case CompletionResult.IGNORED if event.key == "enter":
|
||||
# Handle Enter when completion is not active (bash/normal modes)
|
||||
value = self._text_area.text.strip()
|
||||
if value:
|
||||
event.prevent_default()
|
||||
event.stop()
|
||||
self._history.add(value)
|
||||
self.post_message(self.Submitted(value, self.mode))
|
||||
self._text_area.clear_text()
|
||||
self.mode = "normal"
|
||||
|
||||
def _get_cursor_offset(self) -> int:
|
||||
"""Get the cursor offset as a single integer."""
|
||||
if not self._text_area:
|
||||
return 0
|
||||
|
||||
text = self._text_area.text
|
||||
row, col = self._text_area.cursor_location
|
||||
|
||||
if not text:
|
||||
return 0
|
||||
|
||||
lines = text.split("\n")
|
||||
row = max(0, min(row, len(lines) - 1))
|
||||
col = max(0, col)
|
||||
|
||||
offset = sum(len(lines[i]) + 1 for i in range(row))
|
||||
return offset + min(col, len(lines[row]))
|
||||
|
||||
def watch_mode(self, mode: str) -> None:
|
||||
"""Post mode changed message when mode changes."""
|
||||
self.post_message(self.ModeChanged(mode))
|
||||
|
||||
def focus_input(self) -> None:
|
||||
"""Focus the input field."""
|
||||
if self._text_area:
|
||||
self._text_area.focus()
|
||||
|
||||
@property
|
||||
def value(self) -> str:
|
||||
"""Get the current input value."""
|
||||
if self._text_area:
|
||||
return self._text_area.text
|
||||
return ""
|
||||
|
||||
@value.setter
|
||||
def value(self, val: str) -> None:
|
||||
"""Set the input value."""
|
||||
if self._text_area:
|
||||
self._text_area.text = val
|
||||
|
||||
@property
|
||||
def input_widget(self) -> ChatTextArea | None:
|
||||
"""Get the underlying TextArea widget."""
|
||||
return self._text_area
|
||||
|
||||
def set_disabled(self, *, disabled: bool) -> None:
|
||||
"""Enable or disable the input widget."""
|
||||
if self._text_area:
|
||||
self._text_area.disabled = disabled
|
||||
if disabled:
|
||||
self._text_area.blur()
|
||||
if self._completion_manager:
|
||||
self._completion_manager.reset()
|
||||
|
||||
def set_cursor_active(self, *, active: bool) -> None:
|
||||
"""Set whether the cursor should be actively blinking.
|
||||
|
||||
When active=False (e.g., agent is working), disables cursor blink
|
||||
so the cursor doesn't flash while waiting for a response.
|
||||
"""
|
||||
if self._text_area:
|
||||
self._text_area.set_app_focus(has_focus=active)
|
||||
|
||||
# =========================================================================
|
||||
# CompletionView protocol implementation
|
||||
# =========================================================================
|
||||
|
||||
def render_completion_suggestions(
|
||||
self, suggestions: list[tuple[str, str]], selected_index: int
|
||||
) -> None:
|
||||
"""Render completion suggestions in the popup."""
|
||||
if self._popup:
|
||||
self._popup.update_suggestions(suggestions, selected_index)
|
||||
# Tell TextArea that completion is active so it yields navigation keys
|
||||
if self._text_area:
|
||||
self._text_area.set_completion_active(active=bool(suggestions))
|
||||
|
||||
def clear_completion_suggestions(self) -> None:
|
||||
"""Clear/hide the completion popup."""
|
||||
if self._popup:
|
||||
self._popup.hide()
|
||||
# Tell TextArea that completion is no longer active
|
||||
if self._text_area:
|
||||
self._text_area.set_completion_active(active=False)
|
||||
|
||||
def replace_completion_range(self, start: int, end: int, replacement: str) -> None:
|
||||
"""Replace text in the input field."""
|
||||
if not self._text_area:
|
||||
return
|
||||
|
||||
text = self._text_area.text
|
||||
start = max(0, min(start, len(text)))
|
||||
end = max(start, min(end, len(text)))
|
||||
|
||||
prefix = text[:start]
|
||||
suffix = text[end:]
|
||||
|
||||
# Add space after completion unless it's a directory path
|
||||
if replacement.endswith("/"):
|
||||
insertion = replacement
|
||||
else:
|
||||
insertion = replacement + " " if not suffix.startswith(" ") else replacement
|
||||
|
||||
new_text = f"{prefix}{insertion}{suffix}"
|
||||
self._text_area.text = new_text
|
||||
|
||||
# Calculate new cursor position and move cursor
|
||||
new_offset = start + len(insertion)
|
||||
lines = new_text.split("\n")
|
||||
remaining = new_offset
|
||||
for row, line in enumerate(lines):
|
||||
if remaining <= len(line):
|
||||
self._text_area.move_cursor((row, remaining))
|
||||
break
|
||||
remaining -= len(line) + 1
|
||||
@@ -0,0 +1,194 @@
|
||||
"""Enhanced diff widget for displaying unified diffs."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from textual.containers import Vertical
|
||||
from textual.widgets import Static
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from textual.app import ComposeResult
|
||||
|
||||
|
||||
def _escape_markup(text: str) -> str:
|
||||
"""Escape Rich markup characters in text.
|
||||
|
||||
Args:
|
||||
text: Text that may contain Rich markup
|
||||
|
||||
Returns:
|
||||
Escaped text safe for Rich rendering
|
||||
"""
|
||||
# Escape brackets that could be interpreted as markup
|
||||
return text.replace("[", r"\[").replace("]", r"\]")
|
||||
|
||||
|
||||
def format_diff_textual(diff: str, max_lines: int | None = 100) -> str:
|
||||
"""Format a unified diff with line numbers and colors.
|
||||
|
||||
Args:
|
||||
diff: Unified diff string
|
||||
max_lines: Maximum number of diff lines to show (None for unlimited)
|
||||
|
||||
Returns:
|
||||
Rich-formatted diff string with line numbers
|
||||
"""
|
||||
if not diff:
|
||||
return "[dim]No changes detected[/dim]"
|
||||
|
||||
lines = diff.splitlines()
|
||||
|
||||
# Compute stats first
|
||||
additions = sum(1 for ln in lines if ln.startswith("+") and not ln.startswith("+++"))
|
||||
deletions = sum(1 for ln in lines if ln.startswith("-") and not ln.startswith("---"))
|
||||
|
||||
# Find max line number for width calculation
|
||||
max_line = 0
|
||||
for line in lines:
|
||||
if m := re.match(r"@@ -(\d+)(?:,\d+)? \+(\d+)", line):
|
||||
max_line = max(max_line, int(m.group(1)), int(m.group(2)))
|
||||
width = max(3, len(str(max_line + len(lines))))
|
||||
|
||||
formatted = []
|
||||
|
||||
# Add stats header
|
||||
stats_parts = []
|
||||
if additions:
|
||||
stats_parts.append(f"[green]+{additions}[/green]")
|
||||
if deletions:
|
||||
stats_parts.append(f"[red]-{deletions}[/red]")
|
||||
if stats_parts:
|
||||
formatted.append(" ".join(stats_parts))
|
||||
formatted.append("") # Blank line after stats
|
||||
|
||||
old_num = new_num = 0
|
||||
line_count = 0
|
||||
|
||||
for line in lines:
|
||||
if max_lines and line_count >= max_lines:
|
||||
formatted.append(f"\n[dim]... ({len(lines) - line_count} more lines)[/dim]")
|
||||
break
|
||||
|
||||
# Skip file headers (--- and +++)
|
||||
if line.startswith(("---", "+++")):
|
||||
continue
|
||||
|
||||
# Handle hunk headers - just update line numbers, don't display
|
||||
if m := re.match(r"@@ -(\d+)(?:,\d+)? \+(\d+)", line):
|
||||
old_num, new_num = int(m.group(1)), int(m.group(2))
|
||||
continue
|
||||
|
||||
# Handle diff lines - use gutter bar instead of +/- prefix
|
||||
content = line[1:] if line else ""
|
||||
escaped_content = _escape_markup(content)
|
||||
|
||||
if line.startswith("-"):
|
||||
# Deletion - red gutter bar, subtle red background
|
||||
formatted.append(
|
||||
f"[red bold]▌[/red bold][dim]{old_num:>{width}}[/dim] "
|
||||
f"[on #2d1515]{escaped_content}[/on #2d1515]"
|
||||
)
|
||||
old_num += 1
|
||||
line_count += 1
|
||||
elif line.startswith("+"):
|
||||
# Addition - green gutter bar, subtle green background
|
||||
formatted.append(
|
||||
f"[green bold]▌[/green bold][dim]{new_num:>{width}}[/dim] "
|
||||
f"[on #152d15]{escaped_content}[/on #152d15]"
|
||||
)
|
||||
new_num += 1
|
||||
line_count += 1
|
||||
elif line.startswith(" "):
|
||||
# Context line - dim gutter
|
||||
formatted.append(f"[dim]│{old_num:>{width}}[/dim] {escaped_content}")
|
||||
old_num += 1
|
||||
new_num += 1
|
||||
line_count += 1
|
||||
elif line.strip() == "...":
|
||||
# Truncation marker
|
||||
formatted.append("[dim]...[/dim]")
|
||||
line_count += 1
|
||||
|
||||
return "\n".join(formatted)
|
||||
|
||||
|
||||
class EnhancedDiff(Vertical):
|
||||
"""Widget for displaying a unified diff with syntax highlighting."""
|
||||
|
||||
DEFAULT_CSS = """
|
||||
EnhancedDiff {
|
||||
height: auto;
|
||||
padding: 1;
|
||||
background: $surface-darken-1;
|
||||
border: round $primary;
|
||||
}
|
||||
|
||||
EnhancedDiff .diff-title {
|
||||
color: $primary;
|
||||
text-style: bold;
|
||||
margin-bottom: 1;
|
||||
}
|
||||
|
||||
EnhancedDiff .diff-content {
|
||||
height: auto;
|
||||
}
|
||||
|
||||
EnhancedDiff .diff-stats {
|
||||
color: $text-muted;
|
||||
margin-top: 1;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
diff: str,
|
||||
title: str = "Diff",
|
||||
max_lines: int | None = 100,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize the diff widget.
|
||||
|
||||
Args:
|
||||
diff: Unified diff string
|
||||
title: Title to display above the diff
|
||||
max_lines: Maximum number of diff lines to show
|
||||
**kwargs: Additional arguments passed to parent
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self._diff = diff
|
||||
self._title = title
|
||||
self._max_lines = max_lines
|
||||
self._stats = self._compute_stats()
|
||||
|
||||
def _compute_stats(self) -> tuple[int, int]:
|
||||
"""Compute additions and deletions count.
|
||||
|
||||
Returns:
|
||||
Tuple of (additions, deletions)
|
||||
"""
|
||||
additions = 0
|
||||
deletions = 0
|
||||
for line in self._diff.splitlines():
|
||||
if line.startswith("+") and not line.startswith("+++"):
|
||||
additions += 1
|
||||
elif line.startswith("-") and not line.startswith("---"):
|
||||
deletions += 1
|
||||
return additions, deletions
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the diff widget layout."""
|
||||
yield Static(f"[bold cyan]═══ {self._title} ═══[/bold cyan]", classes="diff-title")
|
||||
|
||||
formatted = format_diff_textual(self._diff, self._max_lines)
|
||||
yield Static(formatted, classes="diff-content")
|
||||
|
||||
additions, deletions = self._stats
|
||||
if additions or deletions:
|
||||
stats_parts = []
|
||||
if additions:
|
||||
stats_parts.append(f"[green]+{additions}[/green]")
|
||||
if deletions:
|
||||
stats_parts.append(f"[red]-{deletions}[/red]")
|
||||
yield Static(" ".join(stats_parts), classes="diff-stats")
|
||||
@@ -0,0 +1,152 @@
|
||||
"""Command history manager for input persistence."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from pathlib import Path # noqa: TC003 - used at runtime in type hints
|
||||
|
||||
|
||||
class HistoryManager:
|
||||
"""Manages command history with file persistence.
|
||||
|
||||
Uses append-only writes for concurrent safety. Multiple agents can
|
||||
safely write to the same history file without corruption.
|
||||
"""
|
||||
|
||||
def __init__(self, history_file: Path, max_entries: int = 100) -> None:
|
||||
"""Initialize the history manager.
|
||||
|
||||
Args:
|
||||
history_file: Path to the JSON-lines history file
|
||||
max_entries: Maximum number of entries to keep
|
||||
"""
|
||||
self.history_file = history_file
|
||||
self.max_entries = max_entries
|
||||
self._entries: list[str] = []
|
||||
self._current_index: int = -1
|
||||
self._temp_input: str = ""
|
||||
self._load_history()
|
||||
|
||||
def _load_history(self) -> None:
|
||||
"""Load history from file."""
|
||||
if not self.history_file.exists():
|
||||
return
|
||||
|
||||
try:
|
||||
with self.history_file.open("r", encoding="utf-8") as f:
|
||||
entries = []
|
||||
for raw_line in f:
|
||||
line = raw_line.rstrip("\n\r")
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
entry = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
entry = line
|
||||
entries.append(entry if isinstance(entry, str) else str(entry))
|
||||
self._entries = entries[-self.max_entries :]
|
||||
except (OSError, UnicodeDecodeError):
|
||||
self._entries = []
|
||||
|
||||
def _append_to_file(self, text: str) -> None:
|
||||
"""Append a single entry to history file (concurrent-safe)."""
|
||||
try:
|
||||
self.history_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
with self.history_file.open("a", encoding="utf-8") as f:
|
||||
f.write(json.dumps(text) + "\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def _compact_history(self) -> None:
|
||||
"""Rewrite history file to remove old entries.
|
||||
|
||||
Only called when entries exceed 2x max_entries to minimize rewrites.
|
||||
"""
|
||||
try:
|
||||
self.history_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
with self.history_file.open("w", encoding="utf-8") as f:
|
||||
for entry in self._entries:
|
||||
f.write(json.dumps(entry) + "\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def add(self, text: str) -> None:
|
||||
"""Add a command to history.
|
||||
|
||||
Args:
|
||||
text: The command text to add
|
||||
"""
|
||||
text = text.strip()
|
||||
# Skip empty or slash commands
|
||||
if not text or text.startswith("/"):
|
||||
return
|
||||
|
||||
# Skip duplicates of the last entry
|
||||
if self._entries and self._entries[-1] == text:
|
||||
return
|
||||
|
||||
self._entries.append(text)
|
||||
|
||||
# Append to file (fast, concurrent-safe)
|
||||
self._append_to_file(text)
|
||||
|
||||
# Compact only when we have 2x max entries (rare operation)
|
||||
if len(self._entries) > self.max_entries * 2:
|
||||
self._entries = self._entries[-self.max_entries :]
|
||||
self._compact_history()
|
||||
|
||||
self.reset_navigation()
|
||||
|
||||
def get_previous(self, current_input: str, prefix: str = "") -> str | None:
|
||||
"""Get the previous history entry.
|
||||
|
||||
Args:
|
||||
current_input: Current input text (saved on first navigation)
|
||||
prefix: Optional prefix to filter entries
|
||||
|
||||
Returns:
|
||||
Previous matching entry or None
|
||||
"""
|
||||
if not self._entries:
|
||||
return None
|
||||
|
||||
# Save current input on first navigation
|
||||
if self._current_index == -1:
|
||||
self._temp_input = current_input
|
||||
self._current_index = len(self._entries)
|
||||
|
||||
# Search backwards for matching entry
|
||||
for i in range(self._current_index - 1, -1, -1):
|
||||
if self._entries[i].startswith(prefix):
|
||||
self._current_index = i
|
||||
return self._entries[i]
|
||||
|
||||
return None
|
||||
|
||||
def get_next(self, prefix: str = "") -> str | None:
|
||||
"""Get the next history entry.
|
||||
|
||||
Args:
|
||||
prefix: Optional prefix to filter entries
|
||||
|
||||
Returns:
|
||||
Next matching entry, original input at end, or None
|
||||
"""
|
||||
if self._current_index == -1:
|
||||
return None
|
||||
|
||||
# Search forwards for matching entry
|
||||
for i in range(self._current_index + 1, len(self._entries)):
|
||||
if self._entries[i].startswith(prefix):
|
||||
self._current_index = i
|
||||
return self._entries[i]
|
||||
|
||||
# Return to original input at the end
|
||||
result = self._temp_input
|
||||
self.reset_navigation()
|
||||
return result
|
||||
|
||||
def reset_navigation(self) -> None:
|
||||
"""Reset navigation state."""
|
||||
self._current_index = -1
|
||||
self._temp_input = ""
|
||||
@@ -0,0 +1,161 @@
|
||||
"""Loading widget with animated spinner for agent activity."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from time import time
|
||||
from typing import TYPE_CHECKING, ClassVar
|
||||
|
||||
from textual.containers import Horizontal
|
||||
from textual.widgets import Static
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from textual.app import ComposeResult
|
||||
|
||||
|
||||
class BrailleSpinner:
|
||||
"""Animated braille spinner."""
|
||||
|
||||
FRAMES: ClassVar[tuple[str, ...]] = (
|
||||
"⠋",
|
||||
"⠙",
|
||||
"⠹",
|
||||
"⠸",
|
||||
"⠼",
|
||||
"⠴",
|
||||
"⠦",
|
||||
"⠧",
|
||||
"⠇",
|
||||
"⠏",
|
||||
)
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize spinner."""
|
||||
self._position = 0
|
||||
|
||||
def next_frame(self) -> str:
|
||||
"""Get next animation frame."""
|
||||
frame = self.FRAMES[self._position]
|
||||
self._position = (self._position + 1) % len(self.FRAMES)
|
||||
return frame
|
||||
|
||||
def current_frame(self) -> str:
|
||||
"""Get current frame without advancing."""
|
||||
return self.FRAMES[self._position]
|
||||
|
||||
|
||||
class LoadingWidget(Static):
|
||||
"""Animated loading indicator with status text and elapsed time.
|
||||
|
||||
Displays: ⠋ Thinking... (3s, esc to interrupt)
|
||||
"""
|
||||
|
||||
DEFAULT_CSS = """
|
||||
LoadingWidget {
|
||||
height: auto;
|
||||
padding: 0 1;
|
||||
}
|
||||
|
||||
LoadingWidget .loading-container {
|
||||
height: auto;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
LoadingWidget .loading-spinner {
|
||||
width: auto;
|
||||
color: $warning;
|
||||
}
|
||||
|
||||
LoadingWidget .loading-status {
|
||||
width: auto;
|
||||
color: $warning;
|
||||
}
|
||||
|
||||
LoadingWidget .loading-hint {
|
||||
width: auto;
|
||||
color: $text-muted;
|
||||
margin-left: 1;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, status: str = "Thinking") -> None:
|
||||
"""Initialize loading widget.
|
||||
|
||||
Args:
|
||||
status: Initial status text to display
|
||||
"""
|
||||
super().__init__()
|
||||
self._status = status
|
||||
self._spinner = BrailleSpinner()
|
||||
self._start_time: float | None = None
|
||||
self._spinner_widget: Static | None = None
|
||||
self._status_widget: Static | None = None
|
||||
self._hint_widget: Static | None = None
|
||||
self._paused = False
|
||||
self._paused_elapsed: int = 0
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the loading widget layout."""
|
||||
with Horizontal(classes="loading-container"):
|
||||
self._spinner_widget = Static(self._spinner.current_frame(), classes="loading-spinner")
|
||||
yield self._spinner_widget
|
||||
|
||||
self._status_widget = Static(f" {self._status}... ", classes="loading-status")
|
||||
yield self._status_widget
|
||||
|
||||
self._hint_widget = Static("(0s, esc to interrupt)", classes="loading-hint")
|
||||
yield self._hint_widget
|
||||
|
||||
def on_mount(self) -> None:
|
||||
"""Start animation on mount."""
|
||||
self._start_time = time()
|
||||
self.set_interval(0.1, self._update_animation)
|
||||
|
||||
def _update_animation(self) -> None:
|
||||
"""Update spinner and elapsed time."""
|
||||
if self._paused:
|
||||
return
|
||||
|
||||
if self._spinner_widget:
|
||||
frame = self._spinner.next_frame()
|
||||
self._spinner_widget.update(f"[#FFD800]{frame}[/]")
|
||||
|
||||
if self._hint_widget and self._start_time is not None:
|
||||
elapsed = int(time() - self._start_time)
|
||||
self._hint_widget.update(f"({elapsed}s, esc to interrupt)")
|
||||
|
||||
def set_status(self, status: str) -> None:
|
||||
"""Update the status text.
|
||||
|
||||
Args:
|
||||
status: New status text
|
||||
"""
|
||||
self._status = status
|
||||
if self._status_widget:
|
||||
self._status_widget.update(f" {self._status}... ")
|
||||
|
||||
def pause(self, status: str = "Awaiting decision") -> None:
|
||||
"""Pause the animation and update status.
|
||||
|
||||
Args:
|
||||
status: Status to show while paused
|
||||
"""
|
||||
self._paused = True
|
||||
if self._start_time is not None:
|
||||
self._paused_elapsed = int(time() - self._start_time)
|
||||
self._status = status
|
||||
if self._status_widget:
|
||||
self._status_widget.update(f" {status}... ")
|
||||
if self._hint_widget:
|
||||
self._hint_widget.update(f"(paused at {self._paused_elapsed}s)")
|
||||
if self._spinner_widget:
|
||||
self._spinner_widget.update("[dim]⏸[/dim]")
|
||||
|
||||
def resume(self) -> None:
|
||||
"""Resume the animation."""
|
||||
self._paused = False
|
||||
self._status = "Thinking"
|
||||
if self._status_widget:
|
||||
self._status_widget.update(f" {self._status}... ")
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Stop the animation (widget will be removed by caller)."""
|
||||
@@ -0,0 +1,507 @@
|
||||
"""Message widgets for deepagents-cli."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from textual.containers import Vertical
|
||||
from textual.css.query import NoMatches
|
||||
from textual.widgets import Markdown, Static
|
||||
from textual.widgets._markdown import MarkdownStream
|
||||
|
||||
from deepagents_cli.ui import format_tool_display
|
||||
from deepagents_cli.widgets.diff import format_diff_textual
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from textual.app import ComposeResult
|
||||
|
||||
# Maximum number of tool arguments to display inline
|
||||
_MAX_INLINE_ARGS = 3
|
||||
|
||||
|
||||
class UserMessage(Static):
|
||||
"""Widget displaying a user message."""
|
||||
|
||||
DEFAULT_CSS = """
|
||||
UserMessage {
|
||||
height: auto;
|
||||
padding: 0 1;
|
||||
margin: 1 0;
|
||||
background: $surface;
|
||||
border-left: thick $primary;
|
||||
}
|
||||
|
||||
UserMessage .user-prefix {
|
||||
color: $primary;
|
||||
text-style: bold;
|
||||
}
|
||||
|
||||
UserMessage .user-content {
|
||||
margin-left: 1;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, content: str, **kwargs: Any) -> None:
|
||||
"""Initialize a user message.
|
||||
|
||||
Args:
|
||||
content: The message content
|
||||
**kwargs: Additional arguments passed to parent
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self._content = content
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the user message layout."""
|
||||
yield Static("[bold cyan]>[/bold cyan] " + self._content)
|
||||
|
||||
|
||||
class AssistantMessage(Vertical):
|
||||
"""Widget displaying an assistant message with markdown support.
|
||||
|
||||
Uses MarkdownStream for smoother streaming instead of re-rendering
|
||||
the full content on each update.
|
||||
"""
|
||||
|
||||
DEFAULT_CSS = """
|
||||
AssistantMessage {
|
||||
height: auto;
|
||||
padding: 0 1;
|
||||
margin: 1 0;
|
||||
}
|
||||
|
||||
AssistantMessage Markdown {
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, content: str = "", **kwargs: Any) -> None:
|
||||
"""Initialize an assistant message.
|
||||
|
||||
Args:
|
||||
content: Initial markdown content
|
||||
**kwargs: Additional arguments passed to parent
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self._content = content
|
||||
self._markdown: Markdown | None = None
|
||||
self._stream: MarkdownStream | None = None
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the assistant message layout."""
|
||||
yield Markdown("", id="assistant-content")
|
||||
|
||||
def on_mount(self) -> None:
|
||||
"""Store reference to markdown widget."""
|
||||
self._markdown = self.query_one("#assistant-content", Markdown)
|
||||
|
||||
def _get_markdown(self) -> Markdown:
|
||||
"""Get the markdown widget, querying if not cached."""
|
||||
if self._markdown is None:
|
||||
self._markdown = self.query_one("#assistant-content", Markdown)
|
||||
return self._markdown
|
||||
|
||||
def _ensure_stream(self) -> MarkdownStream:
|
||||
"""Ensure the markdown stream is initialized."""
|
||||
if self._stream is None:
|
||||
self._stream = Markdown.get_stream(self._get_markdown())
|
||||
return self._stream
|
||||
|
||||
async def append_content(self, text: str) -> None:
|
||||
"""Append content to the message (for streaming).
|
||||
|
||||
Uses MarkdownStream for smoother rendering instead of re-rendering
|
||||
the full content on each chunk.
|
||||
|
||||
Args:
|
||||
text: Text to append
|
||||
"""
|
||||
if not text:
|
||||
return
|
||||
self._content += text
|
||||
stream = self._ensure_stream()
|
||||
await stream.write(text)
|
||||
|
||||
async def write_initial_content(self) -> None:
|
||||
"""Write initial content if provided at construction time."""
|
||||
if self._content:
|
||||
stream = self._ensure_stream()
|
||||
await stream.write(self._content)
|
||||
|
||||
async def stop_stream(self) -> None:
|
||||
"""Stop the streaming and finalize the content."""
|
||||
if self._stream is not None:
|
||||
await self._stream.stop()
|
||||
self._stream = None
|
||||
|
||||
async def set_content(self, content: str) -> None:
|
||||
"""Set the full message content.
|
||||
|
||||
This stops any active stream and sets content directly.
|
||||
|
||||
Args:
|
||||
content: The markdown content to display
|
||||
"""
|
||||
await self.stop_stream()
|
||||
self._content = content
|
||||
if self._markdown:
|
||||
await self._markdown.update(content)
|
||||
|
||||
|
||||
class ToolCallMessage(Vertical):
|
||||
"""Widget displaying a tool call with collapsible output.
|
||||
|
||||
Tool outputs are shown as a 3-line preview by default.
|
||||
Press Ctrl+O to expand/collapse the full output.
|
||||
"""
|
||||
|
||||
DEFAULT_CSS = """
|
||||
ToolCallMessage {
|
||||
height: auto;
|
||||
padding: 0 1;
|
||||
margin: 1 0;
|
||||
background: $surface;
|
||||
border-left: thick $secondary;
|
||||
}
|
||||
|
||||
ToolCallMessage .tool-header {
|
||||
color: $secondary;
|
||||
text-style: bold;
|
||||
}
|
||||
|
||||
ToolCallMessage .tool-args {
|
||||
color: $text-muted;
|
||||
margin-left: 2;
|
||||
}
|
||||
|
||||
ToolCallMessage .tool-status {
|
||||
margin-left: 2;
|
||||
}
|
||||
|
||||
ToolCallMessage .tool-status.pending {
|
||||
color: $warning;
|
||||
}
|
||||
|
||||
ToolCallMessage .tool-status.success {
|
||||
color: $success;
|
||||
}
|
||||
|
||||
ToolCallMessage .tool-status.error {
|
||||
color: $error;
|
||||
}
|
||||
|
||||
ToolCallMessage .tool-status.rejected {
|
||||
color: $warning;
|
||||
}
|
||||
|
||||
ToolCallMessage .tool-output {
|
||||
margin-left: 2;
|
||||
margin-top: 1;
|
||||
padding: 1;
|
||||
background: $surface-darken-1;
|
||||
color: $text-muted;
|
||||
max-height: 20;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
ToolCallMessage .tool-output-preview {
|
||||
margin-left: 2;
|
||||
color: $text-muted;
|
||||
}
|
||||
|
||||
ToolCallMessage .tool-output-hint {
|
||||
margin-left: 2;
|
||||
color: $primary;
|
||||
text-style: italic;
|
||||
}
|
||||
"""
|
||||
|
||||
# Max lines/chars to show in preview mode
|
||||
_PREVIEW_LINES = 3
|
||||
_PREVIEW_CHARS = 200
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
tool_name: str,
|
||||
args: dict[str, Any] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize a tool call message.
|
||||
|
||||
Args:
|
||||
tool_name: Name of the tool being called
|
||||
args: Tool arguments (optional)
|
||||
**kwargs: Additional arguments passed to parent
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self._tool_name = tool_name
|
||||
self._args = args or {}
|
||||
self._status = "pending"
|
||||
self._output: str = ""
|
||||
self._expanded: bool = False
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the tool call message layout."""
|
||||
tool_label = format_tool_display(self._tool_name, self._args)
|
||||
yield Static(
|
||||
f"[bold yellow]Tool:[/bold yellow] {tool_label}",
|
||||
classes="tool-header",
|
||||
)
|
||||
args = self._filtered_args()
|
||||
if args:
|
||||
args_str = ", ".join(f"{k}={v!r}" for k, v in list(args.items())[:_MAX_INLINE_ARGS])
|
||||
if len(args) > _MAX_INLINE_ARGS:
|
||||
args_str += ", ..."
|
||||
yield Static(f"({args_str})", classes="tool-args")
|
||||
yield Static(
|
||||
"[yellow]Pending...[/yellow]",
|
||||
classes="tool-status pending",
|
||||
id="status",
|
||||
)
|
||||
# Output area - hidden initially, shown when output is set
|
||||
yield Static("", classes="tool-output-preview", id="output-preview")
|
||||
yield Static("", classes="tool-output-hint", id="output-hint")
|
||||
yield Static("", classes="tool-output", id="output-full")
|
||||
|
||||
def on_mount(self) -> None:
|
||||
"""Hide output areas initially."""
|
||||
try:
|
||||
self.query_one("#output-preview").display = False
|
||||
self.query_one("#output-hint").display = False
|
||||
self.query_one("#output-full").display = False
|
||||
except NoMatches:
|
||||
pass
|
||||
|
||||
def set_success(self, result: str = "") -> None:
|
||||
"""Mark the tool call as successful.
|
||||
|
||||
Args:
|
||||
result: Tool output/result to display
|
||||
"""
|
||||
self._status = "success"
|
||||
self._output = result
|
||||
try:
|
||||
status = self.query_one("#status", Static)
|
||||
status.remove_class("pending", "error")
|
||||
status.add_class("success")
|
||||
status.update("[green]✓ Success[/green]")
|
||||
except NoMatches:
|
||||
pass
|
||||
self._update_output_display()
|
||||
|
||||
def set_error(self, error: str) -> None:
|
||||
"""Mark the tool call as failed.
|
||||
|
||||
Args:
|
||||
error: Error message
|
||||
"""
|
||||
self._status = "error"
|
||||
self._output = error
|
||||
try:
|
||||
status = self.query_one("#status", Static)
|
||||
status.remove_class("pending", "success")
|
||||
status.add_class("error")
|
||||
status.update("[red]✗ Error[/red]")
|
||||
except NoMatches:
|
||||
pass
|
||||
# Always show full error - errors should be visible
|
||||
self._expanded = True
|
||||
self._update_output_display()
|
||||
|
||||
def set_rejected(self) -> None:
|
||||
"""Mark the tool call as rejected by user."""
|
||||
self._status = "rejected"
|
||||
try:
|
||||
status = self.query_one("#status", Static)
|
||||
status.remove_class("pending", "success", "error")
|
||||
status.add_class("rejected")
|
||||
status.update("[yellow]✗ Rejected[/yellow]")
|
||||
except NoMatches:
|
||||
pass
|
||||
|
||||
def toggle_output(self) -> None:
|
||||
"""Toggle between preview and full output display."""
|
||||
if not self._output:
|
||||
return
|
||||
self._expanded = not self._expanded
|
||||
self._update_output_display()
|
||||
|
||||
def _update_output_display(self) -> None:
|
||||
"""Update the output display based on expanded state."""
|
||||
if not self._output:
|
||||
return
|
||||
|
||||
try:
|
||||
preview = self.query_one("#output-preview", Static)
|
||||
hint = self.query_one("#output-hint", Static)
|
||||
full = self.query_one("#output-full", Static)
|
||||
|
||||
output_stripped = self._output.strip()
|
||||
lines = output_stripped.split("\n")
|
||||
total_lines = len(lines)
|
||||
total_chars = len(output_stripped)
|
||||
|
||||
# Truncate if too many lines OR too many characters
|
||||
needs_truncation = (
|
||||
total_lines > self._PREVIEW_LINES or total_chars > self._PREVIEW_CHARS
|
||||
)
|
||||
|
||||
if self._expanded:
|
||||
# Show full output
|
||||
preview.display = False
|
||||
hint.display = False
|
||||
full.update(self._output)
|
||||
full.display = True
|
||||
else:
|
||||
# Show preview
|
||||
full.display = False
|
||||
if needs_truncation:
|
||||
# Truncate by lines first, then by chars
|
||||
if total_lines > self._PREVIEW_LINES:
|
||||
preview_text = "\n".join(lines[: self._PREVIEW_LINES])
|
||||
else:
|
||||
preview_text = output_stripped
|
||||
|
||||
# Also truncate by chars if still too long
|
||||
if len(preview_text) > self._PREVIEW_CHARS:
|
||||
preview_text = preview_text[: self._PREVIEW_CHARS] + "..."
|
||||
|
||||
preview.update(preview_text)
|
||||
preview.display = True
|
||||
|
||||
# Show expand hint
|
||||
hint.update("[dim]... (Ctrl+O to expand)[/dim]")
|
||||
hint.display = True
|
||||
elif output_stripped:
|
||||
# Output fits in preview, just show it
|
||||
preview.update(output_stripped)
|
||||
preview.display = True
|
||||
hint.display = False
|
||||
else:
|
||||
preview.display = False
|
||||
hint.display = False
|
||||
except NoMatches:
|
||||
pass
|
||||
|
||||
@property
|
||||
def has_output(self) -> bool:
|
||||
"""Check if this tool message has output to display."""
|
||||
return bool(self._output)
|
||||
|
||||
def _filtered_args(self) -> dict[str, Any]:
|
||||
"""Filter large tool args for display."""
|
||||
if self._tool_name not in {"write_file", "edit_file"}:
|
||||
return self._args
|
||||
|
||||
filtered: dict[str, Any] = {}
|
||||
for key in ("file_path", "path", "replace_all"):
|
||||
if key in self._args:
|
||||
filtered[key] = self._args[key]
|
||||
return filtered
|
||||
|
||||
|
||||
class DiffMessage(Static):
|
||||
"""Widget displaying a diff with syntax highlighting."""
|
||||
|
||||
DEFAULT_CSS = """
|
||||
DiffMessage {
|
||||
height: auto;
|
||||
padding: 1;
|
||||
margin: 1 0;
|
||||
background: $surface;
|
||||
border: solid $primary;
|
||||
}
|
||||
|
||||
DiffMessage .diff-header {
|
||||
text-style: bold;
|
||||
margin-bottom: 1;
|
||||
}
|
||||
|
||||
DiffMessage .diff-add {
|
||||
color: #10b981;
|
||||
background: #10b98120;
|
||||
}
|
||||
|
||||
DiffMessage .diff-remove {
|
||||
color: #ef4444;
|
||||
background: #ef444420;
|
||||
}
|
||||
|
||||
DiffMessage .diff-context {
|
||||
color: $text-muted;
|
||||
}
|
||||
|
||||
DiffMessage .diff-hunk {
|
||||
color: $secondary;
|
||||
text-style: bold;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, diff_content: str, file_path: str = "", **kwargs: Any) -> None:
|
||||
"""Initialize a diff message.
|
||||
|
||||
Args:
|
||||
diff_content: The unified diff content
|
||||
file_path: Path to the file being modified
|
||||
**kwargs: Additional arguments passed to parent
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self._diff_content = diff_content
|
||||
self._file_path = file_path
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the diff message layout."""
|
||||
if self._file_path:
|
||||
yield Static(f"[bold]File: {self._file_path}[/bold]", classes="diff-header")
|
||||
|
||||
# Render the diff with enhanced formatting
|
||||
rendered = format_diff_textual(self._diff_content, max_lines=100)
|
||||
yield Static(rendered)
|
||||
|
||||
|
||||
class ErrorMessage(Static):
|
||||
"""Widget displaying an error message."""
|
||||
|
||||
DEFAULT_CSS = """
|
||||
ErrorMessage {
|
||||
height: auto;
|
||||
padding: 1;
|
||||
margin: 1 0;
|
||||
background: #7f1d1d;
|
||||
color: white;
|
||||
border-left: thick $error;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, error: str, **kwargs: Any) -> None:
|
||||
"""Initialize an error message.
|
||||
|
||||
Args:
|
||||
error: The error message
|
||||
**kwargs: Additional arguments passed to parent
|
||||
"""
|
||||
super().__init__(f"[bold red]Error:[/bold red] {error}", **kwargs)
|
||||
|
||||
|
||||
class SystemMessage(Static):
|
||||
"""Widget displaying a system message."""
|
||||
|
||||
DEFAULT_CSS = """
|
||||
SystemMessage {
|
||||
height: auto;
|
||||
padding: 0 1;
|
||||
margin: 1 0;
|
||||
color: $text-muted;
|
||||
text-style: italic;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, message: str, **kwargs: Any) -> None:
|
||||
"""Initialize a system message.
|
||||
|
||||
Args:
|
||||
message: The system message
|
||||
**kwargs: Additional arguments passed to parent
|
||||
"""
|
||||
super().__init__(f"[dim]{message}[/dim]", **kwargs)
|
||||
@@ -0,0 +1,233 @@
|
||||
"""Status bar widget for deepagents-cli."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from textual.containers import Horizontal
|
||||
from textual.css.query import NoMatches
|
||||
from textual.reactive import reactive
|
||||
from textual.widgets import Static
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from textual.app import ComposeResult
|
||||
|
||||
|
||||
class StatusBar(Horizontal):
|
||||
"""Status bar showing mode, auto-approve status, and working directory."""
|
||||
|
||||
DEFAULT_CSS = """
|
||||
StatusBar {
|
||||
height: 1;
|
||||
dock: bottom;
|
||||
background: $surface;
|
||||
padding: 0 1;
|
||||
}
|
||||
|
||||
StatusBar .status-mode {
|
||||
width: auto;
|
||||
padding: 0 1;
|
||||
}
|
||||
|
||||
StatusBar .status-mode.normal {
|
||||
display: none;
|
||||
}
|
||||
|
||||
StatusBar .status-mode.bash {
|
||||
background: #ff1493;
|
||||
color: white;
|
||||
text-style: bold;
|
||||
}
|
||||
|
||||
StatusBar .status-mode.command {
|
||||
background: #8b5cf6;
|
||||
color: white;
|
||||
}
|
||||
|
||||
StatusBar .status-auto-approve {
|
||||
width: auto;
|
||||
padding: 0 1;
|
||||
}
|
||||
|
||||
StatusBar .status-auto-approve.on {
|
||||
background: #10b981;
|
||||
color: black;
|
||||
}
|
||||
|
||||
StatusBar .status-auto-approve.off {
|
||||
background: #f59e0b;
|
||||
color: black;
|
||||
}
|
||||
|
||||
StatusBar .status-message {
|
||||
width: auto;
|
||||
padding: 0 1;
|
||||
color: $text-muted;
|
||||
}
|
||||
|
||||
StatusBar .status-message.thinking {
|
||||
color: $warning;
|
||||
}
|
||||
|
||||
StatusBar .status-cwd {
|
||||
width: 1fr;
|
||||
text-align: right;
|
||||
color: $text-muted;
|
||||
}
|
||||
|
||||
StatusBar .status-tokens {
|
||||
width: auto;
|
||||
padding: 0 1;
|
||||
color: $text-muted;
|
||||
}
|
||||
"""
|
||||
|
||||
mode: reactive[str] = reactive("normal", init=False)
|
||||
status_message: reactive[str] = reactive("", init=False)
|
||||
auto_approve: reactive[bool] = reactive(default=False, init=False)
|
||||
cwd: reactive[str] = reactive("", init=False)
|
||||
tokens: reactive[int] = reactive(0, init=False)
|
||||
|
||||
def __init__(self, cwd: str | Path | None = None, **kwargs: Any) -> None:
|
||||
"""Initialize the status bar.
|
||||
|
||||
Args:
|
||||
cwd: Current working directory to display
|
||||
**kwargs: Additional arguments passed to parent
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
# Store initial cwd - will be used in compose()
|
||||
self._initial_cwd = str(cwd) if cwd else str(Path.cwd())
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the status bar layout."""
|
||||
yield Static("", classes="status-mode normal", id="mode-indicator")
|
||||
yield Static(
|
||||
"manual | shift+tab to cycle",
|
||||
classes="status-auto-approve off",
|
||||
id="auto-approve-indicator",
|
||||
)
|
||||
yield Static("", classes="status-message", id="status-message")
|
||||
yield Static("", classes="status-tokens", id="tokens-display")
|
||||
# CWD shown in welcome banner, not pinned in status bar
|
||||
|
||||
def on_mount(self) -> None:
|
||||
"""Set reactive values after mount to trigger watchers safely."""
|
||||
self.cwd = self._initial_cwd
|
||||
|
||||
def watch_mode(self, mode: str) -> None:
|
||||
"""Update mode indicator when mode changes."""
|
||||
try:
|
||||
indicator = self.query_one("#mode-indicator", Static)
|
||||
except NoMatches:
|
||||
return
|
||||
indicator.remove_class("normal", "bash", "command")
|
||||
|
||||
if mode == "bash":
|
||||
indicator.update("BASH")
|
||||
indicator.add_class("bash")
|
||||
elif mode == "command":
|
||||
indicator.update("CMD")
|
||||
indicator.add_class("command")
|
||||
else:
|
||||
indicator.update("")
|
||||
indicator.add_class("normal")
|
||||
|
||||
def watch_auto_approve(self, new_value: bool) -> None: # noqa: FBT001
|
||||
"""Update auto-approve indicator when state changes."""
|
||||
try:
|
||||
indicator = self.query_one("#auto-approve-indicator", Static)
|
||||
except NoMatches:
|
||||
return
|
||||
indicator.remove_class("on", "off")
|
||||
|
||||
if new_value:
|
||||
indicator.update("auto | shift+tab to cycle")
|
||||
indicator.add_class("on")
|
||||
else:
|
||||
indicator.update("manual | shift+tab to cycle")
|
||||
indicator.add_class("off")
|
||||
|
||||
def watch_cwd(self, new_value: str) -> None:
|
||||
"""Update cwd display when it changes."""
|
||||
try:
|
||||
display = self.query_one("#cwd-display", Static)
|
||||
except NoMatches:
|
||||
return
|
||||
display.update(self._format_cwd(new_value))
|
||||
|
||||
def watch_status_message(self, new_value: str) -> None:
|
||||
"""Update status message display."""
|
||||
try:
|
||||
msg_widget = self.query_one("#status-message", Static)
|
||||
except NoMatches:
|
||||
return
|
||||
|
||||
msg_widget.remove_class("thinking")
|
||||
if new_value:
|
||||
msg_widget.update(new_value)
|
||||
if "thinking" in new_value.lower() or "executing" in new_value.lower():
|
||||
msg_widget.add_class("thinking")
|
||||
else:
|
||||
msg_widget.update("")
|
||||
|
||||
def _format_cwd(self, cwd_path: str = "") -> str:
|
||||
"""Format the current working directory for display."""
|
||||
path = Path(cwd_path or self.cwd or self._initial_cwd)
|
||||
try:
|
||||
# Try to use ~ for home directory
|
||||
home = Path.home()
|
||||
if path.is_relative_to(home):
|
||||
return "~/" + str(path.relative_to(home))
|
||||
except (ValueError, RuntimeError):
|
||||
pass
|
||||
return str(path)
|
||||
|
||||
def set_mode(self, mode: str) -> None:
|
||||
"""Set the current input mode.
|
||||
|
||||
Args:
|
||||
mode: One of "normal", "bash", or "command"
|
||||
"""
|
||||
self.mode = mode
|
||||
|
||||
def set_auto_approve(self, *, enabled: bool) -> None:
|
||||
"""Set the auto-approve state.
|
||||
|
||||
Args:
|
||||
enabled: Whether auto-approve is enabled
|
||||
"""
|
||||
self.auto_approve = enabled
|
||||
|
||||
def set_status_message(self, message: str) -> None:
|
||||
"""Set the status message.
|
||||
|
||||
Args:
|
||||
message: Status message to display (empty string to clear)
|
||||
"""
|
||||
self.status_message = message
|
||||
|
||||
def watch_tokens(self, new_value: int) -> None:
|
||||
"""Update token display when count changes."""
|
||||
try:
|
||||
display = self.query_one("#tokens-display", Static)
|
||||
except NoMatches:
|
||||
return
|
||||
|
||||
if new_value > 0:
|
||||
# Format with K suffix for thousands
|
||||
if new_value >= 1000:
|
||||
display.update(f"{new_value / 1000:.1f}K tokens")
|
||||
else:
|
||||
display.update(f"{new_value} tokens")
|
||||
else:
|
||||
display.update("")
|
||||
|
||||
def set_tokens(self, count: int) -> None:
|
||||
"""Set the token count.
|
||||
|
||||
Args:
|
||||
count: Current context token count
|
||||
"""
|
||||
self.tokens = count
|
||||
@@ -0,0 +1,135 @@
|
||||
"""Tool renderers for approval widgets - registry pattern."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import difflib
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from deepagents_cli.widgets.tool_widgets import (
|
||||
BashApprovalWidget,
|
||||
EditFileApprovalWidget,
|
||||
GenericApprovalWidget,
|
||||
WriteFileApprovalWidget,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from deepagents_cli.widgets.tool_widgets import ToolApprovalWidget
|
||||
|
||||
|
||||
class ToolRenderer:
|
||||
"""Base renderer for tool approval widgets."""
|
||||
|
||||
def get_approval_widget(
|
||||
self, tool_args: dict[str, Any]
|
||||
) -> tuple[type[ToolApprovalWidget], dict[str, Any]]:
|
||||
"""Get the approval widget class and data for this tool.
|
||||
|
||||
Args:
|
||||
tool_args: The tool arguments from action_request
|
||||
|
||||
Returns:
|
||||
Tuple of (widget_class, data_dict)
|
||||
"""
|
||||
return GenericApprovalWidget, tool_args
|
||||
|
||||
|
||||
class WriteFileRenderer(ToolRenderer):
|
||||
"""Renderer for write_file tool - shows full file content."""
|
||||
|
||||
def get_approval_widget(
|
||||
self, tool_args: dict[str, Any]
|
||||
) -> tuple[type[ToolApprovalWidget], dict[str, Any]]:
|
||||
# Extract file extension for syntax highlighting
|
||||
file_path = tool_args.get("file_path", "")
|
||||
content = tool_args.get("content", "")
|
||||
|
||||
# Get file extension
|
||||
file_extension = "text"
|
||||
if "." in file_path:
|
||||
file_extension = file_path.rsplit(".", 1)[-1]
|
||||
|
||||
data = {
|
||||
"file_path": file_path,
|
||||
"content": content,
|
||||
"file_extension": file_extension,
|
||||
}
|
||||
return WriteFileApprovalWidget, data
|
||||
|
||||
|
||||
class EditFileRenderer(ToolRenderer):
|
||||
"""Renderer for edit_file tool - shows unified diff."""
|
||||
|
||||
def get_approval_widget(
|
||||
self, tool_args: dict[str, Any]
|
||||
) -> tuple[type[ToolApprovalWidget], dict[str, Any]]:
|
||||
file_path = tool_args.get("file_path", "")
|
||||
old_string = tool_args.get("old_string", "")
|
||||
new_string = tool_args.get("new_string", "")
|
||||
|
||||
# Generate unified diff
|
||||
diff_lines = self._generate_diff(old_string, new_string)
|
||||
|
||||
data = {
|
||||
"file_path": file_path,
|
||||
"diff_lines": diff_lines,
|
||||
"old_string": old_string,
|
||||
"new_string": new_string,
|
||||
}
|
||||
return EditFileApprovalWidget, data
|
||||
|
||||
def _generate_diff(self, old_string: str, new_string: str) -> list[str]:
|
||||
"""Generate unified diff lines from old and new strings."""
|
||||
if not old_string and not new_string:
|
||||
return []
|
||||
|
||||
old_lines = old_string.split("\n") if old_string else []
|
||||
new_lines = new_string.split("\n") if new_string else []
|
||||
|
||||
# Generate unified diff
|
||||
diff = difflib.unified_diff(
|
||||
old_lines,
|
||||
new_lines,
|
||||
fromfile="before",
|
||||
tofile="after",
|
||||
lineterm="",
|
||||
n=3, # Context lines
|
||||
)
|
||||
|
||||
# Skip the first two header lines (--- and +++)
|
||||
diff_list = list(diff)
|
||||
return diff_list[2:] if len(diff_list) > 2 else diff_list
|
||||
|
||||
|
||||
class BashRenderer(ToolRenderer):
|
||||
"""Renderer for bash/shell tool - shows command."""
|
||||
|
||||
def get_approval_widget(
|
||||
self, tool_args: dict[str, Any]
|
||||
) -> tuple[type[ToolApprovalWidget], dict[str, Any]]:
|
||||
data = {
|
||||
"command": tool_args.get("command", ""),
|
||||
"description": tool_args.get("description", ""),
|
||||
}
|
||||
return BashApprovalWidget, data
|
||||
|
||||
|
||||
# Registry mapping tool names to renderers
|
||||
_RENDERER_REGISTRY: dict[str, type[ToolRenderer]] = {
|
||||
"write_file": WriteFileRenderer,
|
||||
"edit_file": EditFileRenderer,
|
||||
"bash": BashRenderer,
|
||||
"shell": BashRenderer,
|
||||
}
|
||||
|
||||
|
||||
def get_renderer(tool_name: str) -> ToolRenderer:
|
||||
"""Get the renderer for a tool by name.
|
||||
|
||||
Args:
|
||||
tool_name: The name of the tool
|
||||
|
||||
Returns:
|
||||
The appropriate ToolRenderer instance
|
||||
"""
|
||||
renderer_class = _RENDERER_REGISTRY.get(tool_name, ToolRenderer)
|
||||
return renderer_class()
|
||||
@@ -0,0 +1,221 @@
|
||||
"""Tool-specific approval widgets for HITL display."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from textual.containers import Vertical
|
||||
from textual.widgets import Markdown, Static
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from textual.app import ComposeResult
|
||||
|
||||
# Constants for display limits
|
||||
_MAX_VALUE_LEN = 200
|
||||
_MAX_LINES = 30
|
||||
_MAX_DIFF_LINES = 50
|
||||
_MAX_PREVIEW_LINES = 20
|
||||
|
||||
|
||||
def _escape_markup(text: str) -> str:
|
||||
"""Escape Rich markup characters in text."""
|
||||
return text.replace("[", r"\[").replace("]", r"\]")
|
||||
|
||||
|
||||
class ToolApprovalWidget(Vertical):
|
||||
"""Base class for tool approval widgets."""
|
||||
|
||||
def __init__(self, data: dict[str, Any]) -> None:
|
||||
"""Initialize the tool approval widget with data."""
|
||||
super().__init__(classes="tool-approval-widget")
|
||||
self.data = data
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Default compose - override in subclasses."""
|
||||
yield Static("Tool details not available", classes="approval-description")
|
||||
|
||||
|
||||
class GenericApprovalWidget(ToolApprovalWidget):
|
||||
"""Generic approval widget for unknown tools."""
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the generic tool display."""
|
||||
for key, value in self.data.items():
|
||||
if value is None:
|
||||
continue
|
||||
value_str = str(value)
|
||||
if len(value_str) > _MAX_VALUE_LEN:
|
||||
hidden = len(value_str) - _MAX_VALUE_LEN
|
||||
value_str = value_str[:_MAX_VALUE_LEN] + f"... ({hidden} more chars)"
|
||||
yield Static(f"{key}: {value_str}", markup=False, classes="approval-description")
|
||||
|
||||
|
||||
class WriteFileApprovalWidget(ToolApprovalWidget):
|
||||
"""Approval widget for write_file - shows file content with syntax highlighting."""
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the file content display with syntax highlighting."""
|
||||
file_path = self.data.get("file_path", "")
|
||||
content = self.data.get("content", "")
|
||||
file_extension = self.data.get("file_extension", "text")
|
||||
|
||||
# File path header
|
||||
yield Static(f"File: {file_path}", markup=False, classes="approval-file-path")
|
||||
yield Static("")
|
||||
|
||||
# Content with syntax highlighting via Markdown code block
|
||||
lines = content.split("\n")
|
||||
total_lines = len(lines)
|
||||
|
||||
if total_lines > _MAX_LINES:
|
||||
# Truncate for display
|
||||
shown_lines = lines[:_MAX_LINES]
|
||||
remaining = total_lines - _MAX_LINES
|
||||
truncated_content = "\n".join(shown_lines) + f"\n... ({remaining} more lines)"
|
||||
yield Markdown(f"```{file_extension}\n{truncated_content}\n```")
|
||||
else:
|
||||
yield Markdown(f"```{file_extension}\n{content}\n```")
|
||||
|
||||
|
||||
class EditFileApprovalWidget(ToolApprovalWidget):
|
||||
"""Approval widget for edit_file - shows clean diff with colors."""
|
||||
|
||||
DEFAULT_CSS = """
|
||||
EditFileApprovalWidget .diff-removed-line {
|
||||
color: #ff6b6b;
|
||||
background: #3d1f1f;
|
||||
}
|
||||
EditFileApprovalWidget .diff-added-line {
|
||||
color: #69db7c;
|
||||
background: #1f3d1f;
|
||||
}
|
||||
EditFileApprovalWidget .diff-context-line {
|
||||
color: #888888;
|
||||
}
|
||||
EditFileApprovalWidget .diff-stats {
|
||||
color: #888888;
|
||||
margin-top: 1;
|
||||
}
|
||||
"""
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the diff display with colored additions and deletions."""
|
||||
file_path = self.data.get("file_path", "")
|
||||
diff_lines = self.data.get("diff_lines", [])
|
||||
old_string = self.data.get("old_string", "")
|
||||
new_string = self.data.get("new_string", "")
|
||||
|
||||
# Calculate stats first for header
|
||||
additions, deletions = self._count_stats(diff_lines, old_string, new_string)
|
||||
|
||||
# File path header with stats
|
||||
stats_str = self._format_stats(additions, deletions)
|
||||
yield Static(f"[bold cyan]File:[/bold cyan] {file_path} {stats_str}")
|
||||
yield Static("")
|
||||
|
||||
if not diff_lines and not old_string and not new_string:
|
||||
yield Static("No changes to display", classes="approval-description")
|
||||
return
|
||||
|
||||
# Render content
|
||||
if diff_lines:
|
||||
yield from self._render_diff_lines_only(diff_lines)
|
||||
else:
|
||||
yield from self._render_strings_only(old_string, new_string)
|
||||
|
||||
def _count_stats(
|
||||
self, diff_lines: list[str], old_string: str, new_string: str
|
||||
) -> tuple[int, int]:
|
||||
"""Count additions and deletions from diff data."""
|
||||
if diff_lines:
|
||||
additions = sum(
|
||||
1 for line in diff_lines if line.startswith("+") and not line.startswith("+++")
|
||||
)
|
||||
deletions = sum(
|
||||
1 for line in diff_lines if line.startswith("-") and not line.startswith("---")
|
||||
)
|
||||
else:
|
||||
additions = new_string.count("\n") + 1 if new_string else 0
|
||||
deletions = old_string.count("\n") + 1 if old_string else 0
|
||||
return additions, deletions
|
||||
|
||||
def _format_stats(self, additions: int, deletions: int) -> str:
|
||||
"""Format stats as colored string."""
|
||||
parts = []
|
||||
if additions:
|
||||
parts.append(f"[green]+{additions}[/green]")
|
||||
if deletions:
|
||||
parts.append(f"[red]-{deletions}[/red]")
|
||||
return " ".join(parts)
|
||||
|
||||
def _render_diff_lines_only(self, diff_lines: list[str]) -> ComposeResult:
|
||||
"""Render unified diff lines without returning stats."""
|
||||
lines_shown = 0
|
||||
|
||||
for line in diff_lines:
|
||||
if lines_shown >= _MAX_DIFF_LINES:
|
||||
yield Static(f"[dim]... ({len(diff_lines) - lines_shown} more lines)[/dim]")
|
||||
break
|
||||
|
||||
if line.startswith(("@@", "---", "+++")):
|
||||
continue
|
||||
|
||||
widget = self._render_diff_line(line)
|
||||
if widget:
|
||||
yield widget
|
||||
lines_shown += 1
|
||||
|
||||
def _render_strings_only(self, old_string: str, new_string: str) -> ComposeResult:
|
||||
"""Render old/new strings without returning stats."""
|
||||
if old_string:
|
||||
yield Static("[bold red]Removing:[/bold red]")
|
||||
yield from self._render_string_lines(old_string, is_addition=False)
|
||||
yield Static("")
|
||||
|
||||
if new_string:
|
||||
yield Static("[bold green]Adding:[/bold green]")
|
||||
yield from self._render_string_lines(new_string, is_addition=True)
|
||||
|
||||
def _render_diff_line(self, line: str) -> Static | None:
|
||||
"""Render a single diff line with appropriate styling."""
|
||||
content = _escape_markup(line[1:] if len(line) > 1 else "")
|
||||
|
||||
if line.startswith("-"):
|
||||
return Static(f"[on #3d1f1f][red]- {content}[/red][/on #3d1f1f]")
|
||||
if line.startswith("+"):
|
||||
return Static(f"[on #1f3d1f][green]+ {content}[/green][/on #1f3d1f]")
|
||||
if line.startswith(" "):
|
||||
return Static(f"[dim] {content}[/dim]")
|
||||
if line.strip():
|
||||
return Static(line, markup=False)
|
||||
return None
|
||||
|
||||
def _render_string_lines(self, text: str, *, is_addition: bool) -> ComposeResult:
|
||||
"""Render lines from a string with appropriate styling."""
|
||||
lines = text.split("\n")
|
||||
style = "[on #1f3d1f][green]+" if is_addition else "[on #3d1f1f][red]-"
|
||||
end_style = "[/green][/on #1f3d1f]" if is_addition else "[/red][/on #3d1f1f]"
|
||||
|
||||
for line in lines[:_MAX_PREVIEW_LINES]:
|
||||
escaped = _escape_markup(line)
|
||||
yield Static(f"{style} {escaped}{end_style}")
|
||||
|
||||
if len(lines) > _MAX_PREVIEW_LINES:
|
||||
remaining = len(lines) - _MAX_PREVIEW_LINES
|
||||
yield Static(f"[dim]... ({remaining} more lines)[/dim]")
|
||||
|
||||
|
||||
class BashApprovalWidget(ToolApprovalWidget):
|
||||
"""Approval widget for bash/shell commands."""
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the bash command display with syntax highlighting."""
|
||||
command = self.data.get("command", "")
|
||||
description = self.data.get("description", "")
|
||||
|
||||
if description:
|
||||
yield Static(description, markup=False, classes="approval-description")
|
||||
yield Static("")
|
||||
|
||||
# Show command with bash syntax highlighting
|
||||
yield Markdown(f"```bash\n{command}\n```")
|
||||
@@ -0,0 +1,31 @@
|
||||
"""Welcome banner widget for deepagents-cli."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from textual.widgets import Static
|
||||
|
||||
from deepagents_cli._version import __version__
|
||||
from deepagents_cli.config import DEEP_AGENTS_ASCII
|
||||
|
||||
|
||||
class WelcomeBanner(Static):
|
||||
"""Welcome banner displayed at startup."""
|
||||
|
||||
DEFAULT_CSS = """
|
||||
WelcomeBanner {
|
||||
height: auto;
|
||||
padding: 1;
|
||||
margin-bottom: 1;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
"""Initialize the welcome banner."""
|
||||
# Use the same green color as the original UI (#10b981)
|
||||
banner_text = f"[bold #10b981]{DEEP_AGENTS_ASCII}[/bold #10b981]"
|
||||
banner_text += f"[dim]v{__version__}[/dim]\n"
|
||||
banner_text += "[#10b981]Ready to code! What would you like to build?[/#10b981]\n"
|
||||
banner_text += "[dim]Enter send • Ctrl+J newline • @ files • / commands[/dim]"
|
||||
super().__init__(banner_text, **kwargs)
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
name: skill-creator
|
||||
description: Guide for creating effective skills that extend agent capabilities with specialized knowledge, workflows, or tool integrations. Use this skill when the user asks to: (1) create a new skill, (2) make a skill, (3) build a skill, (4) set up a skill, (5) initialize a skill, (6) scaffold a skill, (7) update or modify an existing skill, (8) validate a skill, (9) learn about skill structure, (10) understand how skills work, or (11) get guidance on skill design patterns. Trigger on phrases like "create a skill", "new skill", "make a skill", "skill for X", "how do I create a skill", or "help me build a skill".
|
||||
description: "Guide for creating effective skills that extend agent capabilities with specialized knowledge, workflows, or tool integrations. Use this skill when the user asks to: (1) create a new skill, (2) make a skill, (3) build a skill, (4) set up a skill, (5) initialize a skill, (6) scaffold a skill, (7) update or modify an existing skill, (8) validate a skill, (9) learn about skill structure, (10) understand how skills work, or (11) get guidance on skill design patterns. Trigger on phrases like \"create a skill\", \"new skill\", \"make a skill\", \"skill for X\", \"how do I create a skill\", or \"help me build a skill\"."
|
||||
---
|
||||
|
||||
# Skill Creator
|
||||
|
||||
@@ -6,20 +6,24 @@ readme = "README.md"
|
||||
license = { text = "MIT" }
|
||||
requires-python = ">=3.11,<4.0"
|
||||
dependencies = [
|
||||
"deepagents==0.2.8",
|
||||
"deepagents==0.2.8,<1.0.0",
|
||||
"langchain>=1.2.3,<2.0.0",
|
||||
"langchain-openai>=1.1.7,<2.0.0",
|
||||
"langgraph-checkpoint-sqlite>=2.0.0,<3.0.0",
|
||||
"requests",
|
||||
"rich>=13.0.0",
|
||||
"prompt-toolkit>=3.0.52",
|
||||
"langchain-openai>=0.1.0",
|
||||
"tavily-python",
|
||||
"python-dotenv",
|
||||
"daytona>=0.113.0",
|
||||
"modal>=0.65.0",
|
||||
"markdownify>=0.13.0",
|
||||
"langchain>=1.0.7",
|
||||
"runloop-api-client>=0.69.0",
|
||||
"pillow>=10.0.0",
|
||||
"pyyaml>=6.0",
|
||||
"textual>=1.0.0",
|
||||
"textual-autocomplete>=3.0.0",
|
||||
"aiosqlite>=0.19.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -1,274 +0,0 @@
|
||||
"""Integration test for CLI with auto-approve mode.
|
||||
|
||||
This module implements benchmarking for simple tasks using the DeepAgents CLI; e.g.,
|
||||
"write a poem to a file", "create multiple files", etc.
|
||||
|
||||
The agent runs on auto-approve mode, meaning it can perform actions without
|
||||
user confirmation.
|
||||
|
||||
Note on testing approach:
|
||||
- We use StringIO to capture console output, which is the recommended
|
||||
approach according to Rich's documentation for unit/integration tests.
|
||||
- The capture() context manager is an alternative, but StringIO provides
|
||||
better control and is simpler for testing purposes.
|
||||
- We patch console instances in both main and config modules to ensure
|
||||
all output is captured in the test.
|
||||
"""
|
||||
|
||||
import os
|
||||
import uuid
|
||||
from collections.abc import AsyncIterator
|
||||
from contextlib import asynccontextmanager
|
||||
from io import StringIO
|
||||
from pathlib import Path
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from langgraph.checkpoint.memory import MemorySaver
|
||||
from rich.console import Console
|
||||
|
||||
from deepagents_cli import config as config_module
|
||||
from deepagents_cli import main as main_module
|
||||
from deepagents_cli.agent import create_cli_agent
|
||||
from deepagents_cli.config import SessionState, create_model
|
||||
from deepagents_cli.main import simple_cli
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def run_cli_task(task: str, tmp_path: Path) -> AsyncIterator[tuple[Path, str]]:
|
||||
"""Context manager to run a CLI task with auto-approve and capture output.
|
||||
|
||||
Args:
|
||||
task: The task string to give to the agent
|
||||
tmp_path: Temporary directory for the test
|
||||
|
||||
Yields:
|
||||
tuple: (working_directory: Path, console_output: str)
|
||||
"""
|
||||
original_dir = Path.cwd()
|
||||
os.chdir(tmp_path)
|
||||
|
||||
# Capture console output
|
||||
# Using StringIO is the recommended approach for testing (per Rich docs)
|
||||
output = StringIO()
|
||||
captured_console = Console(
|
||||
file=output,
|
||||
force_terminal=False, # Disable ANSI codes for simpler assertions
|
||||
width=120, # Fixed width for predictable output
|
||||
color_system=None, # Explicitly disable colors for testing
|
||||
legacy_windows=False, # Modern behavior
|
||||
)
|
||||
|
||||
try:
|
||||
# Mock the prompt session to provide input and exit
|
||||
# Use patch.object() to fail immediately if attributes don't exist
|
||||
with patch.object(main_module, "create_prompt_session") as mock_prompt:
|
||||
mock_session = AsyncMock()
|
||||
mock_session.prompt_async.side_effect = [
|
||||
task, # User input
|
||||
EOFError(), # Exit after task
|
||||
]
|
||||
mock_prompt.return_value = mock_session
|
||||
|
||||
# Mock console to capture output
|
||||
# Use patch.object() to fail immediately if attributes don't exist
|
||||
with (
|
||||
patch.object(main_module, "console", captured_console),
|
||||
patch.object(config_module, "console", captured_console),
|
||||
):
|
||||
# Import after patching
|
||||
from deepagents_cli.agent import create_cli_agent
|
||||
from deepagents_cli.config import create_model
|
||||
|
||||
# Create real agent with real model (will use env var or fail gracefully)
|
||||
model = create_model()
|
||||
agent, backend = create_cli_agent(
|
||||
model=model,
|
||||
assistant_id="test_agent",
|
||||
tools=[],
|
||||
sandbox=None,
|
||||
sandbox_type=None,
|
||||
)
|
||||
|
||||
# Create session state with auto-approve
|
||||
session_state = SessionState(auto_approve=True)
|
||||
|
||||
# Run the CLI
|
||||
await simple_cli(
|
||||
agent=agent,
|
||||
assistant_id="test_agent",
|
||||
session_state=session_state,
|
||||
baseline_tokens=0,
|
||||
backend=backend,
|
||||
sandbox_type=None,
|
||||
setup_script_path=None,
|
||||
)
|
||||
|
||||
# Verify that our mocks were actually used (ensures patching worked)
|
||||
mock_prompt.assert_called_once()
|
||||
assert mock_session.prompt_async.call_count >= 1, (
|
||||
"prompt_async should have been called at least once"
|
||||
)
|
||||
|
||||
# Yield the directory and captured output
|
||||
yield tmp_path, output.getvalue()
|
||||
|
||||
finally:
|
||||
os.chdir(original_dir)
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def run_agent_task_with_hitl(task: str, tmp_path: Path) -> AsyncIterator:
|
||||
"""Context manager to run an agent task with HIL and stream events.
|
||||
|
||||
Args:
|
||||
task: The task string to give to the agent
|
||||
tmp_path: Temporary directory for the test
|
||||
|
||||
Yields:
|
||||
AsyncGenerator: Stream of events from the agent
|
||||
"""
|
||||
original_dir = Path.cwd()
|
||||
os.chdir(tmp_path)
|
||||
|
||||
try:
|
||||
# Create agent with HIL enabled (no auto-approve)
|
||||
model = create_model()
|
||||
checkpointer = MemorySaver()
|
||||
agent, _backend = create_cli_agent(
|
||||
model=model,
|
||||
assistant_id="test_agent",
|
||||
tools=[],
|
||||
sandbox=None,
|
||||
sandbox_type=None,
|
||||
)
|
||||
agent.checkpointer = checkpointer
|
||||
|
||||
# Create config with thread_id for checkpointing
|
||||
config = {"configurable": {"thread_id": str(uuid.uuid4())}}
|
||||
|
||||
# Yield the stream generator for the test to consume
|
||||
yield agent.astream(
|
||||
{"messages": [{"role": "user", "content": task}]},
|
||||
config=config,
|
||||
stream_mode="values",
|
||||
)
|
||||
|
||||
finally:
|
||||
os.chdir(original_dir)
|
||||
|
||||
|
||||
class TestSimpleTasks:
|
||||
"""A collection of simple task benchmarks for the deepagents-cli."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.timeout(120) # Agent can take 60-120 seconds
|
||||
async def test_write_hello_to_a_file(self, tmp_path: Path) -> None:
|
||||
"""Test agents to write 'hello' to a file."""
|
||||
async with run_cli_task("write hello to file foo.md", tmp_path) as (
|
||||
work_dir,
|
||||
console_output,
|
||||
):
|
||||
# Verify the file was created
|
||||
output_file = work_dir / "foo.md"
|
||||
assert output_file.exists(), f"foo.md should have been created in {work_dir}"
|
||||
|
||||
content = output_file.read_text()
|
||||
assert "hello" in content.lower(), f"File should contain 'hello', but got: {content!r}"
|
||||
|
||||
# Verify console output shows auto-approve mode
|
||||
# Print output for debugging if assertion fails
|
||||
assert "Auto-approve" in console_output or "⚡" in console_output, (
|
||||
f"Expected auto-approve indicator in output.\nConsole output:\n{console_output}"
|
||||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.timeout(120)
|
||||
async def test_cli_auto_approve_multiple_operations(self, tmp_path: Path) -> None:
|
||||
"""Test agent to create multiple files with auto-approve."""
|
||||
task = "create files test1.txt and test2.txt with content 'test file'"
|
||||
|
||||
async with run_cli_task(task, tmp_path) as (work_dir, console_output):
|
||||
# Verify both files were created
|
||||
test1 = work_dir / "test1.txt"
|
||||
test2 = work_dir / "test2.txt"
|
||||
|
||||
# At least one file should be created (agent might interpret task differently)
|
||||
created_files = [f for f in [test1, test2] if f.exists()]
|
||||
assert len(created_files) > 0, (
|
||||
f"Expected at least one test file to be created in {work_dir}.\n"
|
||||
f"Files in directory: {list(work_dir.iterdir())}"
|
||||
)
|
||||
|
||||
# Verify console output captured the interaction
|
||||
assert len(console_output) > 0, "Console output should not be empty"
|
||||
|
||||
|
||||
class TestAgentBehavior:
|
||||
"""A collection of tests for agent behavior (non-CLI level)."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.timeout(120)
|
||||
async def test_run_command_calls_shell_tool(self, tmp_path: Path) -> None:
|
||||
"""Test that 'run make format' calls shell tool with 'make format' command.
|
||||
|
||||
This test verifies that when a user says "run make format", the agent
|
||||
correctly interprets this as a shell command and calls the shell tool
|
||||
with just "make format" (not including the word "run").
|
||||
|
||||
The test stops at the interrupt (HITL approval point) before the shell
|
||||
tool is actually executed, to verify the correct command is being passed.
|
||||
"""
|
||||
# Mock the settings to use a fresh filesystem in tmp_path
|
||||
from deepagents_cli.config import Settings
|
||||
|
||||
mock_settings = Settings.from_environment(start_path=tmp_path)
|
||||
|
||||
# Patch settings in all modules that import it
|
||||
patches = [
|
||||
patch("deepagents_cli.config.settings", mock_settings),
|
||||
patch("deepagents_cli.agent.settings", mock_settings),
|
||||
patch("deepagents_cli.file_ops.settings", mock_settings),
|
||||
patch("deepagents_cli.tools.settings", mock_settings),
|
||||
patch("deepagents_cli.token_utils.settings", mock_settings),
|
||||
]
|
||||
|
||||
# Apply all patches using ExitStack for cleaner nesting
|
||||
from contextlib import ExitStack
|
||||
|
||||
with ExitStack() as stack:
|
||||
for p in patches:
|
||||
stack.enter_context(p)
|
||||
|
||||
async with run_agent_task_with_hitl("run make format", tmp_path) as stream:
|
||||
# Stream events and capture the final result
|
||||
events = []
|
||||
result = {}
|
||||
async for event in stream:
|
||||
events.append(event)
|
||||
result = event
|
||||
|
||||
# Verify that we captured events
|
||||
assert len(events) > 0, "Expected to receive events from agent stream"
|
||||
|
||||
# Verify that an interrupt occurred (shell tool requires approval)
|
||||
assert "__interrupt__" in result, "Expected shell tool to trigger HITL interrupt"
|
||||
assert result["__interrupt__"] is not None
|
||||
|
||||
# Extract interrupt information
|
||||
interrupts = result["__interrupt__"]
|
||||
assert len(interrupts) > 0, "Expected at least one interrupt"
|
||||
|
||||
interrupt_value = interrupts[0].value
|
||||
action_requests = interrupt_value.get("action_requests", [])
|
||||
|
||||
# Verify that a shell tool call is present
|
||||
shell_calls = [req for req in action_requests if req.get("name") == "shell"]
|
||||
assert len(shell_calls) > 0, "Expected at least one shell tool call"
|
||||
|
||||
# Verify the shell command is "make format" (not "run make format")
|
||||
shell_call = shell_calls[0]
|
||||
command = shell_call.get("args", {}).get("command", "")
|
||||
assert command == "make format", (
|
||||
f"Expected shell command to be 'make format', got: {command}"
|
||||
)
|
||||
@@ -1,139 +0,0 @@
|
||||
"""Tests for project-specific memory and dual agent.md loading."""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from deepagents_cli.agent_memory import AgentMemoryMiddleware
|
||||
from deepagents_cli.config import Settings
|
||||
from deepagents_cli.skills import SkillsMiddleware
|
||||
|
||||
|
||||
class TestAgentMemoryMiddleware:
|
||||
"""Test dual memory loading in AgentMemoryMiddleware."""
|
||||
|
||||
def test_load_user_memory_only(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""Test loading user agent.md when no project memory exists."""
|
||||
# Mock Path.home() to return tmp_path
|
||||
monkeypatch.setattr("pathlib.Path.home", lambda: tmp_path)
|
||||
|
||||
# Create user agent directory
|
||||
agent_dir = tmp_path / ".deepagents" / "test_agent"
|
||||
agent_dir.mkdir(parents=True)
|
||||
user_md = agent_dir / "agent.md"
|
||||
user_md.write_text("User instructions")
|
||||
|
||||
# Create a directory without .git to avoid project detection
|
||||
non_project_dir = tmp_path / "not-a-project"
|
||||
non_project_dir.mkdir()
|
||||
|
||||
# Change to non-project directory for test
|
||||
original_cwd = Path.cwd()
|
||||
try:
|
||||
os.chdir(non_project_dir)
|
||||
|
||||
# Create settings (no project detected from non_project_dir)
|
||||
test_settings = Settings.from_environment(start_path=non_project_dir)
|
||||
|
||||
# Create middleware
|
||||
middleware = AgentMemoryMiddleware(settings=test_settings, assistant_id="test_agent")
|
||||
|
||||
# Simulate before_agent call with no project root
|
||||
state = {}
|
||||
result = middleware.before_agent(state, None)
|
||||
|
||||
assert result["user_memory"] == "User instructions"
|
||||
assert "project_memory" not in result
|
||||
finally:
|
||||
os.chdir(original_cwd)
|
||||
|
||||
def test_load_both_memories(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""Test loading both user and project agent.md."""
|
||||
# Mock Path.home() to return tmp_path
|
||||
monkeypatch.setattr("pathlib.Path.home", lambda: tmp_path)
|
||||
|
||||
# Create user agent directory
|
||||
agent_dir = tmp_path / ".deepagents" / "test_agent"
|
||||
agent_dir.mkdir(parents=True)
|
||||
user_md = agent_dir / "agent.md"
|
||||
user_md.write_text("User instructions")
|
||||
|
||||
# Create project with .git and agent.md in .deepagents/
|
||||
project_root = tmp_path / "project"
|
||||
project_root.mkdir()
|
||||
(project_root / ".git").mkdir()
|
||||
(project_root / ".deepagents").mkdir()
|
||||
project_md = project_root / ".deepagents" / "agent.md"
|
||||
project_md.write_text("Project instructions")
|
||||
|
||||
original_cwd = Path.cwd()
|
||||
try:
|
||||
os.chdir(project_root)
|
||||
|
||||
# Create settings (project detected from project_root)
|
||||
test_settings = Settings.from_environment(start_path=project_root)
|
||||
|
||||
# Create middleware
|
||||
middleware = AgentMemoryMiddleware(settings=test_settings, assistant_id="test_agent")
|
||||
|
||||
# Simulate before_agent call
|
||||
state = {}
|
||||
result = middleware.before_agent(state, None)
|
||||
|
||||
assert result["user_memory"] == "User instructions"
|
||||
assert result["project_memory"] == "Project instructions"
|
||||
finally:
|
||||
os.chdir(original_cwd)
|
||||
|
||||
def test_memory_not_reloaded_if_already_in_state(self, tmp_path: Path) -> None:
|
||||
"""Test that memory is not reloaded if already in state."""
|
||||
agent_dir = tmp_path / ".deepagents" / "test_agent"
|
||||
agent_dir.mkdir(parents=True)
|
||||
|
||||
# Create settings
|
||||
test_settings = Settings.from_environment(start_path=tmp_path)
|
||||
|
||||
middleware = AgentMemoryMiddleware(settings=test_settings, assistant_id="test_agent")
|
||||
|
||||
# State already has memory
|
||||
state = {"user_memory": "Existing memory", "project_memory": "Existing project"}
|
||||
result = middleware.before_agent(state, None)
|
||||
|
||||
# Should return empty dict (no updates)
|
||||
assert result == {}
|
||||
|
||||
|
||||
class TestSkillsPathResolution:
|
||||
"""Test skills path resolution with per-agent structure."""
|
||||
|
||||
def test_skills_middleware_paths(self, tmp_path: Path) -> None:
|
||||
"""Test that skills middleware uses correct per-agent paths."""
|
||||
agent_dir = tmp_path / ".deepagents" / "test_agent"
|
||||
skills_dir = agent_dir / "skills"
|
||||
skills_dir.mkdir(parents=True)
|
||||
|
||||
middleware = SkillsMiddleware(skills_dir=skills_dir, assistant_id="test_agent")
|
||||
|
||||
# Check paths are correctly set
|
||||
assert middleware.skills_dir == skills_dir
|
||||
assert middleware.user_skills_display == "~/.deepagents/test_agent/skills"
|
||||
|
||||
def test_skills_dir_per_agent(self, tmp_path: Path) -> None:
|
||||
"""Test that different agents have separate skills directories."""
|
||||
from deepagents_cli.skills import SkillsMiddleware
|
||||
|
||||
# Agent 1
|
||||
agent1_skills = tmp_path / ".deepagents" / "agent1" / "skills"
|
||||
agent1_skills.mkdir(parents=True)
|
||||
middleware1 = SkillsMiddleware(skills_dir=agent1_skills, assistant_id="agent1")
|
||||
|
||||
# Agent 2
|
||||
agent2_skills = tmp_path / ".deepagents" / "agent2" / "skills"
|
||||
agent2_skills.mkdir(parents=True)
|
||||
middleware2 = SkillsMiddleware(skills_dir=agent2_skills, assistant_id="agent2")
|
||||
|
||||
# Should have different paths
|
||||
assert middleware1.skills_dir != middleware2.skills_dir
|
||||
assert "agent1" in middleware1.user_skills_display
|
||||
assert "agent2" in middleware2.user_skills_display
|
||||
@@ -0,0 +1,327 @@
|
||||
"""Tests for autocomplete fuzzy search functionality."""
|
||||
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from deepagents_cli.widgets.autocomplete import (
|
||||
SLASH_COMMANDS,
|
||||
FuzzyFileController,
|
||||
MultiCompletionManager,
|
||||
SlashCommandController,
|
||||
_find_project_root,
|
||||
_fuzzy_score,
|
||||
_fuzzy_search,
|
||||
_is_dotpath,
|
||||
_path_depth,
|
||||
)
|
||||
|
||||
|
||||
class TestFuzzyScore:
|
||||
"""Tests for the _fuzzy_score function."""
|
||||
|
||||
def test_exact_filename_match_at_start(self):
|
||||
"""Exact match at start of filename gets highest score."""
|
||||
score = _fuzzy_score("main", "src/main.py")
|
||||
assert score > 140 # Should be ~150
|
||||
|
||||
def test_exact_filename_match_anywhere(self):
|
||||
"""Exact match anywhere in filename."""
|
||||
score = _fuzzy_score("test", "src/my_test_file.py")
|
||||
assert score > 90 # Should be ~100
|
||||
|
||||
def test_word_boundary_match(self):
|
||||
"""Match at word boundary (after _, -, .) gets bonus."""
|
||||
score_boundary = _fuzzy_score("test", "src/my_test.py")
|
||||
score_middle = _fuzzy_score("est", "src/mytest.py")
|
||||
assert score_boundary > score_middle
|
||||
|
||||
def test_path_match_lower_than_filename(self):
|
||||
"""Match in path scores lower than filename match."""
|
||||
filename_score = _fuzzy_score("utils", "utils.py")
|
||||
path_score = _fuzzy_score("utils", "src/utils/helper.py")
|
||||
assert filename_score > path_score
|
||||
|
||||
def test_no_match_returns_low_score(self):
|
||||
"""Completely unrelated strings get very low scores."""
|
||||
score = _fuzzy_score("xyz", "abc.py")
|
||||
assert score < 15 # Below MIN_FUZZY_SCORE threshold
|
||||
|
||||
def test_case_insensitive(self):
|
||||
"""Matching is case insensitive."""
|
||||
score_lower = _fuzzy_score("main", "Main.py")
|
||||
score_upper = _fuzzy_score("MAIN", "main.py")
|
||||
assert score_lower > 100
|
||||
assert score_upper > 100
|
||||
|
||||
def test_shorter_paths_preferred(self):
|
||||
"""Shorter paths get slightly higher scores for same match."""
|
||||
short_score = _fuzzy_score("test", "test.py")
|
||||
long_score = _fuzzy_score("test", "very/long/path/to/test.py")
|
||||
assert short_score > long_score
|
||||
|
||||
|
||||
class TestFuzzySearch:
|
||||
"""Tests for the _fuzzy_search function."""
|
||||
|
||||
@pytest.fixture
|
||||
def sample_files(self):
|
||||
"""Sample file list for testing."""
|
||||
return [
|
||||
"README.md",
|
||||
"setup.py",
|
||||
"src/main.py",
|
||||
"src/utils.py",
|
||||
"src/helpers/string_utils.py",
|
||||
"tests/test_main.py",
|
||||
"tests/test_utils.py",
|
||||
".github/workflows/ci.yml",
|
||||
".gitignore",
|
||||
"docs/api.md",
|
||||
]
|
||||
|
||||
def test_empty_query_returns_root_files_first(self, sample_files):
|
||||
"""Empty query returns files sorted by depth, then name."""
|
||||
results = _fuzzy_search("", sample_files, limit=5)
|
||||
# Root level files should come first
|
||||
assert results[0] in ["README.md", "setup.py"]
|
||||
assert all("/" not in r for r in results[:2]) # First items are root level
|
||||
|
||||
def test_exact_match_ranked_first(self, sample_files):
|
||||
"""Exact filename matches are ranked first."""
|
||||
results = _fuzzy_search("main", sample_files, limit=5)
|
||||
assert "src/main.py" in results[:2]
|
||||
|
||||
def test_filters_dotfiles_by_default(self, sample_files):
|
||||
"""Dotfiles are filtered out by default."""
|
||||
results = _fuzzy_search("git", sample_files, limit=10)
|
||||
assert not any(".git" in r for r in results)
|
||||
|
||||
def test_includes_dotfiles_when_query_starts_with_dot(self, sample_files):
|
||||
"""Dotfiles included when query starts with '.'."""
|
||||
results = _fuzzy_search(".git", sample_files, limit=10, include_dotfiles=True)
|
||||
assert any(".git" in r for r in results)
|
||||
|
||||
def test_respects_limit(self, sample_files):
|
||||
"""Results respect the limit parameter."""
|
||||
results = _fuzzy_search("", sample_files, limit=3)
|
||||
assert len(results) <= 3
|
||||
|
||||
def test_filters_low_score_matches(self, sample_files):
|
||||
"""Low score matches are filtered out."""
|
||||
results = _fuzzy_search("xyznonexistent", sample_files, limit=10)
|
||||
assert len(results) == 0
|
||||
|
||||
def test_utils_matches_multiple_files(self, sample_files):
|
||||
"""Query matching multiple files returns all matches."""
|
||||
results = _fuzzy_search("utils", sample_files, limit=10)
|
||||
assert len(results) >= 2
|
||||
assert any("utils.py" in r for r in results)
|
||||
|
||||
|
||||
class TestHelperFunctions:
|
||||
"""Tests for helper functions."""
|
||||
|
||||
def test_is_dotpath_detects_dotfiles(self):
|
||||
"""_is_dotpath correctly identifies dotfiles."""
|
||||
assert _is_dotpath(".gitignore") is True
|
||||
assert _is_dotpath(".github/workflows/ci.yml") is True
|
||||
assert _is_dotpath("src/.hidden/file.py") is True
|
||||
|
||||
def test_is_dotpath_allows_normal_files(self):
|
||||
"""_is_dotpath returns False for normal files."""
|
||||
assert _is_dotpath("src/main.py") is False
|
||||
assert _is_dotpath("README.md") is False
|
||||
assert _is_dotpath("tests/test_main.py") is False
|
||||
|
||||
def test_path_depth_counts_slashes(self):
|
||||
"""_path_depth correctly counts directory depth."""
|
||||
assert _path_depth("file.py") == 0
|
||||
assert _path_depth("src/file.py") == 1
|
||||
assert _path_depth("src/utils/file.py") == 2
|
||||
assert _path_depth("a/b/c/d/file.py") == 4
|
||||
|
||||
|
||||
class TestFindProjectRoot:
|
||||
"""Tests for _find_project_root function."""
|
||||
|
||||
def test_finds_git_root(self, tmp_path):
|
||||
"""Finds .git directory and returns its parent."""
|
||||
# Create nested structure with .git at root
|
||||
git_dir = tmp_path / ".git"
|
||||
git_dir.mkdir()
|
||||
nested = tmp_path / "src" / "deep" / "nested"
|
||||
nested.mkdir(parents=True)
|
||||
|
||||
result = _find_project_root(nested)
|
||||
assert result == tmp_path
|
||||
|
||||
def test_returns_start_path_when_no_git(self, tmp_path):
|
||||
"""Returns start path when no .git found."""
|
||||
nested = tmp_path / "some" / "path"
|
||||
nested.mkdir(parents=True)
|
||||
|
||||
result = _find_project_root(nested)
|
||||
# Should return the path itself (or a parent) since no .git exists
|
||||
assert result == nested or nested.is_relative_to(result)
|
||||
|
||||
def test_handles_root_level_git(self, tmp_path):
|
||||
"""Handles .git at the start path itself."""
|
||||
git_dir = tmp_path / ".git"
|
||||
git_dir.mkdir()
|
||||
|
||||
result = _find_project_root(tmp_path)
|
||||
assert result == tmp_path
|
||||
|
||||
|
||||
class TestSlashCommandController:
|
||||
"""Tests for SlashCommandController."""
|
||||
|
||||
@pytest.fixture
|
||||
def mock_view(self):
|
||||
"""Create a mock CompletionView."""
|
||||
view = MagicMock()
|
||||
return view
|
||||
|
||||
@pytest.fixture
|
||||
def controller(self, mock_view):
|
||||
"""Create a SlashCommandController with mock view."""
|
||||
return SlashCommandController(SLASH_COMMANDS, mock_view)
|
||||
|
||||
def test_can_handle_slash_prefix(self, controller):
|
||||
"""Handles text starting with /."""
|
||||
assert controller.can_handle("/", 1) is True
|
||||
assert controller.can_handle("/hel", 4) is True
|
||||
assert controller.can_handle("/help", 5) is True
|
||||
|
||||
def test_cannot_handle_non_slash(self, controller):
|
||||
"""Does not handle text not starting with /."""
|
||||
assert controller.can_handle("hello", 5) is False
|
||||
assert controller.can_handle("", 0) is False
|
||||
assert controller.can_handle("test /cmd", 9) is False
|
||||
|
||||
def test_filters_commands_by_prefix(self, controller, mock_view):
|
||||
"""Filters commands based on typed prefix."""
|
||||
controller.on_text_changed("/hel", 4)
|
||||
|
||||
# Should have called render with /help suggestion
|
||||
mock_view.render_completion_suggestions.assert_called()
|
||||
suggestions = mock_view.render_completion_suggestions.call_args[0][0]
|
||||
assert any("/help" in s[0] for s in suggestions)
|
||||
|
||||
def test_shows_all_commands_on_slash_only(self, controller, mock_view):
|
||||
"""Shows all commands when just / is typed."""
|
||||
controller.on_text_changed("/", 1)
|
||||
|
||||
mock_view.render_completion_suggestions.assert_called()
|
||||
suggestions = mock_view.render_completion_suggestions.call_args[0][0]
|
||||
assert len(suggestions) == len(SLASH_COMMANDS)
|
||||
|
||||
def test_clears_on_no_match(self, controller, mock_view):
|
||||
"""Clears suggestions when no commands match after having suggestions."""
|
||||
# First get some suggestions
|
||||
controller.on_text_changed("/h", 2)
|
||||
mock_view.render_completion_suggestions.assert_called()
|
||||
|
||||
# Now type something that doesn't match - should clear
|
||||
controller.on_text_changed("/xyz", 4)
|
||||
mock_view.clear_completion_suggestions.assert_called()
|
||||
|
||||
def test_reset_clears_state(self, controller, mock_view):
|
||||
"""Reset clears suggestions and state."""
|
||||
controller.on_text_changed("/h", 2)
|
||||
controller.reset()
|
||||
|
||||
mock_view.clear_completion_suggestions.assert_called()
|
||||
|
||||
|
||||
class TestFuzzyFileControllerCanHandle:
|
||||
"""Tests for FuzzyFileController.can_handle method."""
|
||||
|
||||
@pytest.fixture
|
||||
def mock_view(self):
|
||||
"""Create a mock CompletionView."""
|
||||
return MagicMock()
|
||||
|
||||
@pytest.fixture
|
||||
def controller(self, mock_view, tmp_path):
|
||||
"""Create a FuzzyFileController."""
|
||||
return FuzzyFileController(mock_view, cwd=tmp_path)
|
||||
|
||||
def test_handles_at_symbol(self, controller):
|
||||
"""Handles text with @ symbol."""
|
||||
assert controller.can_handle("@", 1) is True
|
||||
assert controller.can_handle("@file", 5) is True
|
||||
assert controller.can_handle("look at @src/main.py", 20) is True
|
||||
|
||||
def test_handles_at_mid_text(self, controller):
|
||||
"""Handles @ in middle of text."""
|
||||
assert controller.can_handle("check @file", 11) is True
|
||||
assert controller.can_handle("see @", 5) is True
|
||||
|
||||
def test_no_handle_without_at(self, controller):
|
||||
"""Does not handle text without @."""
|
||||
assert controller.can_handle("hello", 5) is False
|
||||
assert controller.can_handle("", 0) is False
|
||||
|
||||
def test_no_handle_at_after_cursor(self, controller):
|
||||
"""Does not handle @ that's after cursor position."""
|
||||
assert controller.can_handle("hello @file", 5) is False
|
||||
|
||||
def test_no_handle_space_after_at(self, controller):
|
||||
"""Does not handle @ followed by space before cursor."""
|
||||
assert controller.can_handle("@ file", 6) is False
|
||||
assert controller.can_handle("@file name", 10) is False
|
||||
|
||||
def test_invalid_cursor_positions(self, controller):
|
||||
"""Handles invalid cursor positions gracefully."""
|
||||
assert controller.can_handle("@file", 0) is False
|
||||
assert controller.can_handle("@file", -1) is False
|
||||
assert controller.can_handle("@file", 100) is False
|
||||
|
||||
|
||||
class TestMultiCompletionManager:
|
||||
"""Tests for MultiCompletionManager."""
|
||||
|
||||
@pytest.fixture
|
||||
def mock_view(self):
|
||||
"""Create a mock CompletionView."""
|
||||
return MagicMock()
|
||||
|
||||
@pytest.fixture
|
||||
def manager(self, mock_view, tmp_path):
|
||||
"""Create a MultiCompletionManager with both controllers."""
|
||||
slash_ctrl = SlashCommandController(SLASH_COMMANDS, mock_view)
|
||||
file_ctrl = FuzzyFileController(mock_view, cwd=tmp_path)
|
||||
return MultiCompletionManager([slash_ctrl, file_ctrl])
|
||||
|
||||
def test_activates_slash_controller_for_slash(self, manager):
|
||||
"""Activates slash controller for / prefix."""
|
||||
manager.on_text_changed("/help", 5)
|
||||
assert manager._active is not None
|
||||
assert isinstance(manager._active, SlashCommandController)
|
||||
|
||||
def test_activates_file_controller_for_at(self, manager):
|
||||
"""Activates file controller for @ prefix."""
|
||||
manager.on_text_changed("@file", 5)
|
||||
assert manager._active is not None
|
||||
assert isinstance(manager._active, FuzzyFileController)
|
||||
|
||||
def test_no_active_for_plain_text(self, manager):
|
||||
"""No controller active for plain text."""
|
||||
manager.on_text_changed("hello world", 11)
|
||||
assert manager._active is None
|
||||
|
||||
def test_switches_controllers(self, manager):
|
||||
"""Switches between controllers as input changes."""
|
||||
manager.on_text_changed("/cmd", 4)
|
||||
assert isinstance(manager._active, SlashCommandController)
|
||||
|
||||
manager.on_text_changed("@file", 5)
|
||||
assert isinstance(manager._active, FuzzyFileController)
|
||||
|
||||
def test_reset_clears_active(self, manager):
|
||||
"""Reset clears active controller."""
|
||||
manager.on_text_changed("/cmd", 4)
|
||||
manager.reset()
|
||||
assert manager._active is None
|
||||
@@ -50,17 +50,17 @@ class TestProjectRootDetection:
|
||||
|
||||
|
||||
class TestProjectAgentMdFinding:
|
||||
"""Test finding project-specific agent.md files."""
|
||||
"""Test finding project-specific AGENTS.md files."""
|
||||
|
||||
def test_find_agent_md_in_deepagents_dir(self, tmp_path: Path) -> None:
|
||||
"""Test finding agent.md in .deepagents/ directory."""
|
||||
"""Test finding AGENTS.md in .deepagents/ directory."""
|
||||
project_root = tmp_path / "project"
|
||||
project_root.mkdir()
|
||||
|
||||
# Create .deepagents/agent.md
|
||||
# Create .deepagents/AGENTS.md
|
||||
deepagents_dir = project_root / ".deepagents"
|
||||
deepagents_dir.mkdir()
|
||||
agent_md = deepagents_dir / "agent.md"
|
||||
agent_md = deepagents_dir / "AGENTS.md"
|
||||
agent_md.write_text("Project instructions")
|
||||
|
||||
result = _find_project_agent_md(project_root)
|
||||
@@ -68,12 +68,12 @@ class TestProjectAgentMdFinding:
|
||||
assert result[0] == agent_md
|
||||
|
||||
def test_find_agent_md_in_root(self, tmp_path: Path) -> None:
|
||||
"""Test finding agent.md in project root (fallback)."""
|
||||
"""Test finding AGENTS.md in project root (fallback)."""
|
||||
project_root = tmp_path / "project"
|
||||
project_root.mkdir()
|
||||
|
||||
# Create root-level agent.md (no .deepagents/)
|
||||
agent_md = project_root / "agent.md"
|
||||
# Create root-level AGENTS.md (no .deepagents/)
|
||||
agent_md = project_root / "AGENTS.md"
|
||||
agent_md.write_text("Project instructions")
|
||||
|
||||
result = _find_project_agent_md(project_root)
|
||||
@@ -81,17 +81,17 @@ class TestProjectAgentMdFinding:
|
||||
assert result[0] == agent_md
|
||||
|
||||
def test_both_agent_md_files_combined(self, tmp_path: Path) -> None:
|
||||
"""Test that both agent.md files are returned when both exist."""
|
||||
"""Test that both AGENTS.md files are returned when both exist."""
|
||||
project_root = tmp_path / "project"
|
||||
project_root.mkdir()
|
||||
|
||||
# Create both locations
|
||||
deepagents_dir = project_root / ".deepagents"
|
||||
deepagents_dir.mkdir()
|
||||
deepagents_md = deepagents_dir / "agent.md"
|
||||
deepagents_md = deepagents_dir / "AGENTS.md"
|
||||
deepagents_md.write_text("In .deepagents/")
|
||||
|
||||
root_md = project_root / "agent.md"
|
||||
root_md = project_root / "AGENTS.md"
|
||||
root_md.write_text("In root")
|
||||
|
||||
# Should return both, with .deepagents/ first
|
||||
@@ -101,7 +101,7 @@ class TestProjectAgentMdFinding:
|
||||
assert result[1] == root_md
|
||||
|
||||
def test_find_agent_md_not_found(self, tmp_path: Path) -> None:
|
||||
"""Test that empty list is returned when no agent.md exists."""
|
||||
"""Test that empty list is returned when no AGENTS.md exists."""
|
||||
project_root = tmp_path / "project"
|
||||
project_root.mkdir()
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ def test_imports() -> None:
|
||||
"""Test importing deepagents modules."""
|
||||
from deepagents_cli import (
|
||||
agent, # noqa: F401
|
||||
agent_memory, # noqa: F401
|
||||
integrations, # noqa: F401
|
||||
)
|
||||
from deepagents_cli.main import cli_main # noqa: F401
|
||||
|
||||
@@ -0,0 +1,263 @@
|
||||
"""Tests for session/thread management."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import sqlite3
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from deepagents_cli import sessions
|
||||
|
||||
|
||||
class TestGenerateThreadId:
|
||||
"""Tests for generate_thread_id function."""
|
||||
|
||||
def test_length(self):
|
||||
"""Thread IDs are 8 characters."""
|
||||
tid = sessions.generate_thread_id()
|
||||
assert len(tid) == 8
|
||||
|
||||
def test_hex(self):
|
||||
"""Thread IDs are valid hex strings."""
|
||||
tid = sessions.generate_thread_id()
|
||||
# Should not raise
|
||||
int(tid, 16)
|
||||
|
||||
def test_unique(self):
|
||||
"""Thread IDs are unique."""
|
||||
ids = {sessions.generate_thread_id() for _ in range(100)}
|
||||
assert len(ids) == 100
|
||||
|
||||
|
||||
class TestThreadFunctions:
|
||||
"""Tests for thread query functions."""
|
||||
|
||||
@pytest.fixture
|
||||
def temp_db(self, tmp_path):
|
||||
"""Create a temporary database with test data."""
|
||||
db_path = tmp_path / "test_sessions.db"
|
||||
|
||||
# Create tables and insert test data
|
||||
conn = sqlite3.connect(str(db_path))
|
||||
conn.execute("""
|
||||
CREATE TABLE IF NOT EXISTS checkpoints (
|
||||
thread_id TEXT NOT NULL,
|
||||
checkpoint_ns TEXT NOT NULL DEFAULT '',
|
||||
checkpoint_id TEXT NOT NULL,
|
||||
parent_checkpoint_id TEXT,
|
||||
type TEXT,
|
||||
checkpoint BLOB,
|
||||
metadata BLOB,
|
||||
PRIMARY KEY (thread_id, checkpoint_ns, checkpoint_id)
|
||||
)
|
||||
""")
|
||||
conn.execute("""
|
||||
CREATE TABLE IF NOT EXISTS writes (
|
||||
thread_id TEXT NOT NULL,
|
||||
checkpoint_ns TEXT NOT NULL DEFAULT '',
|
||||
checkpoint_id TEXT NOT NULL,
|
||||
task_id TEXT NOT NULL,
|
||||
idx INTEGER NOT NULL,
|
||||
channel TEXT NOT NULL,
|
||||
type TEXT,
|
||||
value BLOB,
|
||||
PRIMARY KEY (thread_id, checkpoint_ns, checkpoint_id, task_id, idx)
|
||||
)
|
||||
""")
|
||||
|
||||
# Insert test threads with metadata as JSON
|
||||
from datetime import UTC, datetime
|
||||
|
||||
now = datetime.now(UTC).isoformat()
|
||||
earlier = "2024-01-01T10:00:00+00:00"
|
||||
|
||||
threads = [
|
||||
("thread1", "agent1", now),
|
||||
("thread2", "agent2", earlier),
|
||||
("thread3", "agent1", earlier),
|
||||
]
|
||||
|
||||
for tid, agent, updated in threads:
|
||||
metadata = json.dumps({"agent_name": agent, "updated_at": updated})
|
||||
conn.execute(
|
||||
"INSERT INTO checkpoints (thread_id, checkpoint_ns, checkpoint_id, metadata) VALUES (?, '', ?, ?)",
|
||||
(tid, f"cp_{tid}", metadata),
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
return db_path
|
||||
|
||||
def test_list_threads_empty(self, tmp_path):
|
||||
"""List returns empty when no threads exist."""
|
||||
db_path = tmp_path / "empty.db"
|
||||
# Create empty db with table structure
|
||||
conn = sqlite3.connect(str(db_path))
|
||||
conn.execute("""
|
||||
CREATE TABLE IF NOT EXISTS checkpoints (
|
||||
thread_id TEXT NOT NULL,
|
||||
checkpoint_ns TEXT NOT NULL DEFAULT '',
|
||||
checkpoint_id TEXT NOT NULL,
|
||||
metadata BLOB,
|
||||
PRIMARY KEY (thread_id, checkpoint_ns, checkpoint_id)
|
||||
)
|
||||
""")
|
||||
conn.commit()
|
||||
conn.close()
|
||||
with patch.object(sessions, "get_db_path", return_value=db_path):
|
||||
threads = asyncio.run(sessions.list_threads())
|
||||
assert threads == []
|
||||
|
||||
def test_list_threads(self, temp_db):
|
||||
"""List returns all threads."""
|
||||
with patch.object(sessions, "get_db_path", return_value=temp_db):
|
||||
threads = asyncio.run(sessions.list_threads())
|
||||
assert len(threads) == 3
|
||||
|
||||
def test_list_threads_filter_by_agent(self, temp_db):
|
||||
"""List filters by agent name."""
|
||||
with patch.object(sessions, "get_db_path", return_value=temp_db):
|
||||
threads = asyncio.run(sessions.list_threads(agent_name="agent1"))
|
||||
assert len(threads) == 2
|
||||
assert all(t["agent_name"] == "agent1" for t in threads)
|
||||
|
||||
def test_list_threads_limit(self, temp_db):
|
||||
"""List respects limit."""
|
||||
with patch.object(sessions, "get_db_path", return_value=temp_db):
|
||||
threads = asyncio.run(sessions.list_threads(limit=2))
|
||||
assert len(threads) == 2
|
||||
|
||||
def test_get_most_recent(self, temp_db):
|
||||
"""Get most recent returns latest thread."""
|
||||
with patch.object(sessions, "get_db_path", return_value=temp_db):
|
||||
tid = asyncio.run(sessions.get_most_recent())
|
||||
assert tid is not None
|
||||
|
||||
def test_get_most_recent_filter(self, temp_db):
|
||||
"""Get most recent filters by agent."""
|
||||
with patch.object(sessions, "get_db_path", return_value=temp_db):
|
||||
tid = asyncio.run(sessions.get_most_recent(agent_name="agent2"))
|
||||
assert tid == "thread2"
|
||||
|
||||
def test_get_most_recent_empty(self, tmp_path):
|
||||
"""Get most recent returns None when empty."""
|
||||
db_path = tmp_path / "empty.db"
|
||||
# Create empty db with table structure
|
||||
conn = sqlite3.connect(str(db_path))
|
||||
conn.execute("""
|
||||
CREATE TABLE IF NOT EXISTS checkpoints (
|
||||
thread_id TEXT NOT NULL,
|
||||
checkpoint_ns TEXT NOT NULL DEFAULT '',
|
||||
checkpoint_id TEXT NOT NULL,
|
||||
metadata BLOB,
|
||||
PRIMARY KEY (thread_id, checkpoint_ns, checkpoint_id)
|
||||
)
|
||||
""")
|
||||
conn.commit()
|
||||
conn.close()
|
||||
with patch.object(sessions, "get_db_path", return_value=db_path):
|
||||
tid = asyncio.run(sessions.get_most_recent())
|
||||
assert tid is None
|
||||
|
||||
def test_thread_exists(self, temp_db):
|
||||
"""Thread exists returns True for existing thread."""
|
||||
with patch.object(sessions, "get_db_path", return_value=temp_db):
|
||||
assert asyncio.run(sessions.thread_exists("thread1")) is True
|
||||
|
||||
def test_thread_not_exists(self, temp_db):
|
||||
"""Thread exists returns False for non-existing thread."""
|
||||
with patch.object(sessions, "get_db_path", return_value=temp_db):
|
||||
assert asyncio.run(sessions.thread_exists("nonexistent")) is False
|
||||
|
||||
def test_get_thread_agent(self, temp_db):
|
||||
"""Get thread agent returns correct agent name."""
|
||||
with patch.object(sessions, "get_db_path", return_value=temp_db):
|
||||
agent = asyncio.run(sessions.get_thread_agent("thread1"))
|
||||
assert agent == "agent1"
|
||||
|
||||
def test_get_thread_agent_not_found(self, temp_db):
|
||||
"""Get thread agent returns None for non-existing thread."""
|
||||
with patch.object(sessions, "get_db_path", return_value=temp_db):
|
||||
agent = asyncio.run(sessions.get_thread_agent("nonexistent"))
|
||||
assert agent is None
|
||||
|
||||
def test_delete_thread(self, temp_db):
|
||||
"""Delete thread removes thread."""
|
||||
with patch.object(sessions, "get_db_path", return_value=temp_db):
|
||||
result = asyncio.run(sessions.delete_thread("thread1"))
|
||||
assert result is True
|
||||
assert asyncio.run(sessions.thread_exists("thread1")) is False
|
||||
|
||||
def test_delete_thread_not_found(self, temp_db):
|
||||
"""Delete thread returns False for non-existing thread."""
|
||||
with patch.object(sessions, "get_db_path", return_value=temp_db):
|
||||
result = asyncio.run(sessions.delete_thread("nonexistent"))
|
||||
assert result is False
|
||||
|
||||
|
||||
class TestGetCheckpointer:
|
||||
"""Tests for get_checkpointer async context manager."""
|
||||
|
||||
def test_returns_async_sqlite_saver(self, tmp_path):
|
||||
"""Get checkpointer returns AsyncSqliteSaver."""
|
||||
|
||||
async def _test():
|
||||
db_path = tmp_path / "test.db"
|
||||
with patch.object(sessions, "get_db_path", return_value=db_path):
|
||||
async with sessions.get_checkpointer() as cp:
|
||||
assert "AsyncSqliteSaver" in type(cp).__name__
|
||||
|
||||
asyncio.run(_test())
|
||||
|
||||
|
||||
class TestFormatTimestamp:
|
||||
"""Tests for _format_timestamp helper."""
|
||||
|
||||
def test_valid_timestamp(self):
|
||||
"""Formats valid ISO timestamp."""
|
||||
result = sessions._format_timestamp("2024-12-30T21:18:00+00:00")
|
||||
assert result # Non-empty string
|
||||
assert "dec" in result.lower()
|
||||
|
||||
def test_none(self):
|
||||
"""Returns empty for None."""
|
||||
result = sessions._format_timestamp(None)
|
||||
assert result == ""
|
||||
|
||||
def test_invalid(self):
|
||||
"""Returns empty for invalid timestamp."""
|
||||
result = sessions._format_timestamp("not a timestamp")
|
||||
assert result == ""
|
||||
|
||||
|
||||
class TestTextualSessionState:
|
||||
"""Tests for TextualSessionState from app.py."""
|
||||
|
||||
def test_stores_provided_thread_id(self):
|
||||
"""TextualSessionState stores provided thread_id."""
|
||||
from deepagents_cli.app import TextualSessionState
|
||||
|
||||
tid = sessions.generate_thread_id()
|
||||
state = TextualSessionState(thread_id=tid)
|
||||
assert state.thread_id == tid
|
||||
|
||||
def test_generates_id_if_none(self):
|
||||
"""TextualSessionState generates ID if none provided."""
|
||||
from deepagents_cli.app import TextualSessionState
|
||||
|
||||
state = TextualSessionState(thread_id=None)
|
||||
assert state.thread_id is not None
|
||||
assert len(state.thread_id) == 8
|
||||
|
||||
def test_reset_thread(self):
|
||||
"""reset_thread generates a new thread ID."""
|
||||
from deepagents_cli.app import TextualSessionState
|
||||
|
||||
state = TextualSessionState(thread_id="original")
|
||||
old_id = state.thread_id
|
||||
new_id = state.reset_thread()
|
||||
assert new_id != old_id
|
||||
assert len(new_id) == 8
|
||||
assert state.thread_id == new_id
|
||||
369
deepagents_sourcecode/libs/deepagents-cli/uv.lock
generated
369
deepagents_sourcecode/libs/deepagents-cli/uv.lock
generated
@@ -152,6 +152,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aiosqlite"
|
||||
version = "0.22.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/4e/8a/64761f4005f17809769d23e518d915db74e6310474e733e3593cfc854ef1/aiosqlite-0.22.1.tar.gz", hash = "sha256:043e0bd78d32888c0a9ca90fc788b38796843360c855a7262a532813133a0650", size = 14821, upload-time = "2025-12-23T19:25:43.997Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/00/b7/e3bf5133d697a08128598c8d0abc5e16377b51465a33756de24fa7dee953/aiosqlite-0.22.1-py3-none-any.whl", hash = "sha256:21c002eb13823fad740196c5a2e9d8e62f6243bd9e7e4a1f87fb5e44ecb4fceb", size = 17405, upload-time = "2025-12-23T19:25:42.139Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "annotated-types"
|
||||
version = "0.7.0"
|
||||
@@ -163,7 +172,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "anthropic"
|
||||
version = "0.74.1"
|
||||
version = "0.75.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
@@ -175,9 +184,9 @@ dependencies = [
|
||||
{ name = "sniffio" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d7/7b/609eea5c54ae69b1a4a94169d4b0c86dc5c41b43509989913f6cdc61b81d/anthropic-0.74.1.tar.gz", hash = "sha256:04c087b2751385c524f6d332d066a913870e4de8b3e335fb0a0c595f1f88dc6e", size = 428981, upload-time = "2025-11-19T22:17:31.533Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/04/1f/08e95f4b7e2d35205ae5dcbb4ae97e7d477fc521c275c02609e2931ece2d/anthropic-0.75.0.tar.gz", hash = "sha256:e8607422f4ab616db2ea5baacc215dd5f028da99ce2f022e33c7c535b29f3dfb", size = 439565, upload-time = "2025-11-24T20:41:45.28Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/dd/45/6b18d0692302b8cbc01a10c35b43953d3c4172fbd4f83337b8ed21a8eaa4/anthropic-0.74.1-py3-none-any.whl", hash = "sha256:b07b998d1cee7f41d9f02530597d7411672b362cc2417760a40c0167b81c6e65", size = 371473, upload-time = "2025-11-19T22:17:29.998Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/1c/1cd02b7ae64302a6e06724bf80a96401d5313708651d277b1458504a1730/anthropic-0.75.0-py3-none-any.whl", hash = "sha256:ea8317271b6c15d80225a9f3c670152746e88805a7a61e14d4a374577164965b", size = 388164, upload-time = "2025-11-24T20:41:43.587Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -248,15 +257,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/8c/2b30c12155ad8de0cf641d76a8b396a16d2c36bc6d50b621a62b7c4567c1/build-1.3.0-py3-none-any.whl", hash = "sha256:7145f0b5061ba90a1500d60bd1b13ca0a8a4cebdd0cc16ed8adf1c0e739f43b4", size = 23382, upload-time = "2025-08-01T21:27:07.844Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cachetools"
|
||||
version = "6.2.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/fb/44/ca1675be2a83aeee1886ab745b28cda92093066590233cc501890eb8417a/cachetools-6.2.2.tar.gz", hash = "sha256:8e6d266b25e539df852251cfd6f990b4bc3a141db73b939058d809ebd2590fc6", size = 31571, upload-time = "2025-11-13T17:42:51.465Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e6/46/eb6eca305c77a4489affe1c5d8f4cae82f285d9addd8de4ec084a7184221/cachetools-6.2.2-py3-none-any.whl", hash = "sha256:6c09c98183bf58560c97b2abfcedcbaf6a896a490f534b031b661d3723b45ace", size = 11503, upload-time = "2025-11-13T17:42:50.232Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cbor2"
|
||||
version = "5.7.1"
|
||||
@@ -676,7 +676,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "deepagents"
|
||||
version = "0.3.1"
|
||||
version = "0.3.5"
|
||||
source = { directory = "../deepagents" }
|
||||
dependencies = [
|
||||
{ name = "langchain" },
|
||||
@@ -688,10 +688,10 @@ dependencies = [
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "langchain", specifier = ">=1.1.0,<2.0.0" },
|
||||
{ name = "langchain-anthropic", specifier = ">=1.2.0,<2.0.0" },
|
||||
{ name = "langchain-core", specifier = ">=1.1.0,<2.0.0" },
|
||||
{ name = "langchain-google-genai" },
|
||||
{ name = "langchain", specifier = ">=1.2.3,<2.0.0" },
|
||||
{ name = "langchain-anthropic", specifier = ">=1.3.1,<2.0.0" },
|
||||
{ name = "langchain-core", specifier = ">=1.2.6,<2.0.0" },
|
||||
{ name = "langchain-google-genai", specifier = ">=4.1.3,<5.0.0" },
|
||||
{ name = "wcmatch" },
|
||||
]
|
||||
|
||||
@@ -715,10 +715,12 @@ name = "deepagents-cli"
|
||||
version = "0.0.12"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "aiosqlite" },
|
||||
{ name = "daytona" },
|
||||
{ name = "deepagents" },
|
||||
{ name = "langchain" },
|
||||
{ name = "langchain-openai" },
|
||||
{ name = "langgraph-checkpoint-sqlite" },
|
||||
{ name = "markdownify" },
|
||||
{ name = "modal" },
|
||||
{ name = "pillow" },
|
||||
@@ -729,6 +731,8 @@ dependencies = [
|
||||
{ name = "rich" },
|
||||
{ name = "runloop-api-client" },
|
||||
{ name = "tavily-python" },
|
||||
{ name = "textual" },
|
||||
{ name = "textual-autocomplete" },
|
||||
]
|
||||
|
||||
[package.dev-dependencies]
|
||||
@@ -759,10 +763,12 @@ test = [
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "aiosqlite", specifier = ">=0.19.0" },
|
||||
{ name = "daytona", specifier = ">=0.113.0" },
|
||||
{ name = "deepagents", directory = "../deepagents" },
|
||||
{ name = "langchain", specifier = ">=1.0.7" },
|
||||
{ name = "langchain-openai", specifier = ">=0.1.0" },
|
||||
{ name = "langchain", specifier = ">=1.2.3,<2.0.0" },
|
||||
{ name = "langchain-openai", specifier = ">=1.1.7,<2.0.0" },
|
||||
{ name = "langgraph-checkpoint-sqlite", specifier = ">=2.0.0,<3.0.0" },
|
||||
{ name = "markdownify", specifier = ">=0.13.0" },
|
||||
{ name = "modal", specifier = ">=0.65.0" },
|
||||
{ name = "pillow", specifier = ">=10.0.0" },
|
||||
@@ -773,6 +779,8 @@ requires-dist = [
|
||||
{ name = "rich", specifier = ">=13.0.0" },
|
||||
{ name = "runloop-api-client", specifier = ">=0.69.0" },
|
||||
{ name = "tavily-python" },
|
||||
{ name = "textual", specifier = ">=1.0.0" },
|
||||
{ name = "textual-autocomplete", specifier = ">=3.0.0" },
|
||||
]
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
@@ -967,133 +975,43 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/9a/e35b4a917281c0b8419d4207f4334c8e8c5dbf4f3f5f9ada73958d937dcc/frozenlist-1.8.0-py3-none-any.whl", hash = "sha256:0c18a16eab41e82c295618a77502e17b195883241c563b00f0aa5106fc4eaa0d", size = 13409, upload-time = "2025-10-06T05:38:16.721Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "google-ai-generativelanguage"
|
||||
version = "0.9.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "google-api-core", extra = ["grpc"] },
|
||||
{ name = "google-auth" },
|
||||
{ name = "grpcio" },
|
||||
{ name = "proto-plus" },
|
||||
{ name = "protobuf" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ad/7e/67fdc46187541ead599e77f259d915f129c2f49568ebf5cadb322130712b/google_ai_generativelanguage-0.9.0.tar.gz", hash = "sha256:2524748f413917446febc8e0879dc0d4f026a064f89f17c42b81bea77ab76c84", size = 1481662, upload-time = "2025-10-20T14:56:23.123Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/91/c2d39ad5d77813afadb0f0b8789d882d15c191710b6b6f7cb158376342ff/google_ai_generativelanguage-0.9.0-py3-none-any.whl", hash = "sha256:59f61e54cb341e602073098389876594c4d12e458617727558bb2628a86f3eb2", size = 1401288, upload-time = "2025-10-20T14:52:58.403Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "google-api-core"
|
||||
version = "2.28.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "google-auth" },
|
||||
{ name = "googleapis-common-protos" },
|
||||
{ name = "proto-plus" },
|
||||
{ name = "protobuf" },
|
||||
{ name = "requests" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/61/da/83d7043169ac2c8c7469f0e375610d78ae2160134bf1b80634c482fa079c/google_api_core-2.28.1.tar.gz", hash = "sha256:2b405df02d68e68ce0fbc138559e6036559e685159d148ae5861013dc201baf8", size = 176759, upload-time = "2025-10-28T21:34:51.529Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ed/d4/90197b416cb61cefd316964fd9e7bd8324bcbafabf40eef14a9f20b81974/google_api_core-2.28.1-py3-none-any.whl", hash = "sha256:4021b0f8ceb77a6fb4de6fde4502cecab45062e66ff4f2895169e0b35bc9466c", size = 173706, upload-time = "2025-10-28T21:34:50.151Z" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
grpc = [
|
||||
{ name = "grpcio" },
|
||||
{ name = "grpcio-status" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "google-auth"
|
||||
version = "2.43.0"
|
||||
version = "2.47.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "cachetools" },
|
||||
{ name = "pyasn1-modules" },
|
||||
{ name = "rsa" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ff/ef/66d14cf0e01b08d2d51ffc3c20410c4e134a1548fc246a6081eae585a4fe/google_auth-2.43.0.tar.gz", hash = "sha256:88228eee5fc21b62a1b5fe773ca15e67778cb07dc8363adcb4a8827b52d81483", size = 296359, upload-time = "2025-11-06T00:13:36.587Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/60/3c/ec64b9a275ca22fa1cd3b6e77fefcf837b0732c890aa32d2bd21313d9b33/google_auth-2.47.0.tar.gz", hash = "sha256:833229070a9dfee1a353ae9877dcd2dec069a8281a4e72e72f77d4a70ff945da", size = 323719, upload-time = "2026-01-06T21:55:31.045Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/d1/385110a9ae86d91cc14c5282c61fe9f4dc41c0b9f7d423c6ad77038c4448/google_auth-2.43.0-py2.py3-none-any.whl", hash = "sha256:af628ba6fa493f75c7e9dbe9373d148ca9f4399b5ea29976519e0a3848eddd16", size = 223114, upload-time = "2025-11-06T00:13:35.209Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/db/18/79e9008530b79527e0d5f79e7eef08d3b179b7f851cfd3a2f27822fbdfa9/google_auth-2.47.0-py3-none-any.whl", hash = "sha256:c516d68336bfde7cf0da26aab674a36fedcf04b37ac4edd59c597178760c3498", size = 234867, upload-time = "2026-01-06T21:55:28.6Z" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
requests = [
|
||||
{ name = "requests" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "googleapis-common-protos"
|
||||
version = "1.72.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "protobuf" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e5/7b/adfd75544c415c487b33061fe7ae526165241c1ea133f9a9125a56b39fd8/googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5", size = 147433, upload-time = "2025-11-06T18:29:24.087Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grpcio"
|
||||
version = "1.76.0"
|
||||
name = "google-genai"
|
||||
version = "1.57.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
{ name = "distro" },
|
||||
{ name = "google-auth", extra = ["requests"] },
|
||||
{ name = "httpx" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "requests" },
|
||||
{ name = "sniffio" },
|
||||
{ name = "tenacity" },
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "websockets" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b6/e0/318c1ce3ae5a17894d5791e87aea147587c9e702f24122cc7a5c8bbaeeb1/grpcio-1.76.0.tar.gz", hash = "sha256:7be78388d6da1a25c0d5ec506523db58b18be22d9c37d8d3a32c08be4987bd73", size = 12785182, upload-time = "2025-10-21T16:23:12.106Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/2b/b4/8251c2d2576224a4b51a8ab6159820f9200b8da28ff555c78ee15607096e/google_genai-1.57.0.tar.gz", hash = "sha256:0ff9c36b8d68abfbdbd13b703ece926de5f3e67955666b36315ecf669b94a826", size = 485648, upload-time = "2026-01-07T20:38:20.271Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a0/00/8163a1beeb6971f66b4bbe6ac9457b97948beba8dd2fc8e1281dce7f79ec/grpcio-1.76.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:2e1743fbd7f5fa713a1b0a8ac8ebabf0ec980b5d8809ec358d488e273b9cf02a", size = 5843567, upload-time = "2025-10-21T16:20:52.829Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/10/c1/934202f5cf335e6d852530ce14ddb0fef21be612ba9ecbbcbd4d748ca32d/grpcio-1.76.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:a8c2cf1209497cf659a667d7dea88985e834c24b7c3b605e6254cbb5076d985c", size = 11848017, upload-time = "2025-10-21T16:20:56.705Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/11/0b/8dec16b1863d74af6eb3543928600ec2195af49ca58b16334972f6775663/grpcio-1.76.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:08caea849a9d3c71a542827d6df9d5a69067b0a1efbea8a855633ff5d9571465", size = 6412027, upload-time = "2025-10-21T16:20:59.3Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/64/7b9e6e7ab910bea9d46f2c090380bab274a0b91fb0a2fe9b0cd399fffa12/grpcio-1.76.0-cp311-cp311-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:f0e34c2079d47ae9f6188211db9e777c619a21d4faba6977774e8fa43b085e48", size = 7075913, upload-time = "2025-10-21T16:21:01.645Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/68/86/093c46e9546073cefa789bd76d44c5cb2abc824ca62af0c18be590ff13ba/grpcio-1.76.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8843114c0cfce61b40ad48df65abcfc00d4dba82eae8718fab5352390848c5da", size = 6615417, upload-time = "2025-10-21T16:21:03.844Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f7/b6/5709a3a68500a9c03da6fb71740dcdd5ef245e39266461a03f31a57036d8/grpcio-1.76.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8eddfb4d203a237da6f3cc8a540dad0517d274b5a1e9e636fd8d2c79b5c1d397", size = 7199683, upload-time = "2025-10-21T16:21:06.195Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/91/d3/4b1f2bf16ed52ce0b508161df3a2d186e4935379a159a834cb4a7d687429/grpcio-1.76.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:32483fe2aab2c3794101c2a159070584e5db11d0aa091b2c0ea9c4fc43d0d749", size = 8163109, upload-time = "2025-10-21T16:21:08.498Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5c/61/d9043f95f5f4cf085ac5dd6137b469d41befb04bd80280952ffa2a4c3f12/grpcio-1.76.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:dcfe41187da8992c5f40aa8c5ec086fa3672834d2be57a32384c08d5a05b4c00", size = 7626676, upload-time = "2025-10-21T16:21:10.693Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/36/95/fd9a5152ca02d8881e4dd419cdd790e11805979f499a2e5b96488b85cf27/grpcio-1.76.0-cp311-cp311-win32.whl", hash = "sha256:2107b0c024d1b35f4083f11245c0e23846ae64d02f40b2b226684840260ed054", size = 3997688, upload-time = "2025-10-21T16:21:12.746Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/9c/5c359c8d4c9176cfa3c61ecd4efe5affe1f38d9bae81e81ac7186b4c9cc8/grpcio-1.76.0-cp311-cp311-win_amd64.whl", hash = "sha256:522175aba7af9113c48ec10cc471b9b9bd4f6ceb36aeb4544a8e2c80ed9d252d", size = 4709315, upload-time = "2025-10-21T16:21:15.26Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bf/05/8e29121994b8d959ffa0afd28996d452f291b48cfc0875619de0bde2c50c/grpcio-1.76.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:81fd9652b37b36f16138611c7e884eb82e0cec137c40d3ef7c3f9b3ed00f6ed8", size = 5799718, upload-time = "2025-10-21T16:21:17.939Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/75/11d0e66b3cdf998c996489581bdad8900db79ebd83513e45c19548f1cba4/grpcio-1.76.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:04bbe1bfe3a68bbfd4e52402ab7d4eb59d72d02647ae2042204326cf4bbad280", size = 11825627, upload-time = "2025-10-21T16:21:20.466Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/28/50/2f0aa0498bc188048f5d9504dcc5c2c24f2eb1a9337cd0fa09a61a2e75f0/grpcio-1.76.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d388087771c837cdb6515539f43b9d4bf0b0f23593a24054ac16f7a960be16f4", size = 6359167, upload-time = "2025-10-21T16:21:23.122Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/66/e5/bbf0bb97d29ede1d59d6588af40018cfc345b17ce979b7b45424628dc8bb/grpcio-1.76.0-cp312-cp312-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:9f8f757bebaaea112c00dba718fc0d3260052ce714e25804a03f93f5d1c6cc11", size = 7044267, upload-time = "2025-10-21T16:21:25.995Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/86/f6ec2164f743d9609691115ae8ece098c76b894ebe4f7c94a655c6b03e98/grpcio-1.76.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:980a846182ce88c4f2f7e2c22c56aefd515daeb36149d1c897f83cf57999e0b6", size = 6573963, upload-time = "2025-10-21T16:21:28.631Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/bc/8d9d0d8505feccfdf38a766d262c71e73639c165b311c9457208b56d92ae/grpcio-1.76.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f92f88e6c033db65a5ae3d97905c8fea9c725b63e28d5a75cb73b49bda5024d8", size = 7164484, upload-time = "2025-10-21T16:21:30.837Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/67/e6/5d6c2fc10b95edf6df9b8f19cf10a34263b7fd48493936fffd5085521292/grpcio-1.76.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4baf3cbe2f0be3289eb68ac8ae771156971848bb8aaff60bad42005539431980", size = 8127777, upload-time = "2025-10-21T16:21:33.577Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3f/c8/dce8ff21c86abe025efe304d9e31fdb0deaaa3b502b6a78141080f206da0/grpcio-1.76.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:615ba64c208aaceb5ec83bfdce7728b80bfeb8be97562944836a7a0a9647d882", size = 7594014, upload-time = "2025-10-21T16:21:41.882Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e0/42/ad28191ebf983a5d0ecef90bab66baa5a6b18f2bfdef9d0a63b1973d9f75/grpcio-1.76.0-cp312-cp312-win32.whl", hash = "sha256:45d59a649a82df5718fd9527ce775fd66d1af35e6d31abdcdc906a49c6822958", size = 3984750, upload-time = "2025-10-21T16:21:44.006Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/00/7bd478cbb851c04a48baccaa49b75abaa8e4122f7d86da797500cccdd771/grpcio-1.76.0-cp312-cp312-win_amd64.whl", hash = "sha256:c088e7a90b6017307f423efbb9d1ba97a22aa2170876223f9709e9d1de0b5347", size = 4704003, upload-time = "2025-10-21T16:21:46.244Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/ed/71467ab770effc9e8cef5f2e7388beb2be26ed642d567697bb103a790c72/grpcio-1.76.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:26ef06c73eb53267c2b319f43e6634c7556ea37672029241a056629af27c10e2", size = 5807716, upload-time = "2025-10-21T16:21:48.475Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/85/c6ed56f9817fab03fa8a111ca91469941fb514e3e3ce6d793cb8f1e1347b/grpcio-1.76.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:45e0111e73f43f735d70786557dc38141185072d7ff8dc1829d6a77ac1471468", size = 11821522, upload-time = "2025-10-21T16:21:51.142Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ac/31/2b8a235ab40c39cbc141ef647f8a6eb7b0028f023015a4842933bc0d6831/grpcio-1.76.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:83d57312a58dcfe2a3a0f9d1389b299438909a02db60e2f2ea2ae2d8034909d3", size = 6362558, upload-time = "2025-10-21T16:21:54.213Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/64/9784eab483358e08847498ee56faf8ff6ea8e0a4592568d9f68edc97e9e9/grpcio-1.76.0-cp313-cp313-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:3e2a27c89eb9ac3d81ec8835e12414d73536c6e620355d65102503064a4ed6eb", size = 7049990, upload-time = "2025-10-21T16:21:56.476Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/94/8c12319a6369434e7a184b987e8e9f3b49a114c489b8315f029e24de4837/grpcio-1.76.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61f69297cba3950a524f61c7c8ee12e55c486cb5f7db47ff9dcee33da6f0d3ae", size = 6575387, upload-time = "2025-10-21T16:21:59.051Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/15/0f/f12c32b03f731f4a6242f771f63039df182c8b8e2cf8075b245b409259d4/grpcio-1.76.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6a15c17af8839b6801d554263c546c69c4d7718ad4321e3166175b37eaacca77", size = 7166668, upload-time = "2025-10-21T16:22:02.049Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/2d/3ec9ce0c2b1d92dd59d1c3264aaec9f0f7c817d6e8ac683b97198a36ed5a/grpcio-1.76.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:25a18e9810fbc7e7f03ec2516addc116a957f8cbb8cbc95ccc80faa072743d03", size = 8124928, upload-time = "2025-10-21T16:22:04.984Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/74/fd3317be5672f4856bcdd1a9e7b5e17554692d3db9a3b273879dc02d657d/grpcio-1.76.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:931091142fd8cc14edccc0845a79248bc155425eee9a98b2db2ea4f00a235a42", size = 7589983, upload-time = "2025-10-21T16:22:07.881Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/bb/ca038cf420f405971f19821c8c15bcbc875505f6ffadafe9ffd77871dc4c/grpcio-1.76.0-cp313-cp313-win32.whl", hash = "sha256:5e8571632780e08526f118f74170ad8d50fb0a48c23a746bef2a6ebade3abd6f", size = 3984727, upload-time = "2025-10-21T16:22:10.032Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/80/84087dc56437ced7cdd4b13d7875e7439a52a261e3ab4e06488ba6173b0a/grpcio-1.76.0-cp313-cp313-win_amd64.whl", hash = "sha256:f9f7bd5faab55f47231ad8dba7787866b69f5e93bc306e3915606779bbfb4ba8", size = 4702799, upload-time = "2025-10-21T16:22:12.709Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/46/39adac80de49d678e6e073b70204091e76631e03e94928b9ea4ecf0f6e0e/grpcio-1.76.0-cp314-cp314-linux_armv7l.whl", hash = "sha256:ff8a59ea85a1f2191a0ffcc61298c571bc566332f82e5f5be1b83c9d8e668a62", size = 5808417, upload-time = "2025-10-21T16:22:15.02Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9c/f5/a4531f7fb8b4e2a60b94e39d5d924469b7a6988176b3422487be61fe2998/grpcio-1.76.0-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:06c3d6b076e7b593905d04fdba6a0525711b3466f43b3400266f04ff735de0cd", size = 11828219, upload-time = "2025-10-21T16:22:17.954Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/1c/de55d868ed7a8bd6acc6b1d6ddc4aa36d07a9f31d33c912c804adb1b971b/grpcio-1.76.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fd5ef5932f6475c436c4a55e4336ebbe47bd3272be04964a03d316bbf4afbcbc", size = 6367826, upload-time = "2025-10-21T16:22:20.721Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/59/64/99e44c02b5adb0ad13ab3adc89cb33cb54bfa90c74770f2607eea629b86f/grpcio-1.76.0-cp314-cp314-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:b331680e46239e090f5b3cead313cc772f6caa7d0fc8de349337563125361a4a", size = 7049550, upload-time = "2025-10-21T16:22:23.637Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/43/28/40a5be3f9a86949b83e7d6a2ad6011d993cbe9b6bd27bea881f61c7788b6/grpcio-1.76.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2229ae655ec4e8999599469559e97630185fdd53ae1e8997d147b7c9b2b72cba", size = 6575564, upload-time = "2025-10-21T16:22:26.016Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/a9/1be18e6055b64467440208a8559afac243c66a8b904213af6f392dc2212f/grpcio-1.76.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:490fa6d203992c47c7b9e4a9d39003a0c2bcc1c9aa3c058730884bbbb0ee9f09", size = 7176236, upload-time = "2025-10-21T16:22:28.362Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/55/dba05d3fcc151ce6e81327541d2cc8394f442f6b350fead67401661bf041/grpcio-1.76.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:479496325ce554792dba6548fae3df31a72cef7bad71ca2e12b0e58f9b336bfc", size = 8125795, upload-time = "2025-10-21T16:22:31.075Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4a/45/122df922d05655f63930cf42c9e3f72ba20aadb26c100ee105cad4ce4257/grpcio-1.76.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:1c9b93f79f48b03ada57ea24725d83a30284a012ec27eab2cf7e50a550cbbbcc", size = 7592214, upload-time = "2025-10-21T16:22:33.831Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4a/6e/0b899b7f6b66e5af39e377055fb4a6675c9ee28431df5708139df2e93233/grpcio-1.76.0-cp314-cp314-win32.whl", hash = "sha256:747fa73efa9b8b1488a95d0ba1039c8e2dca0f741612d80415b1e1c560febf4e", size = 4062961, upload-time = "2025-10-21T16:22:36.468Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/41/0b430b01a2eb38ee887f88c1f07644a1df8e289353b78e82b37ef988fb64/grpcio-1.76.0-cp314-cp314-win_amd64.whl", hash = "sha256:922fa70ba549fce362d2e2871ab542082d66e2aaf0c19480ea453905b01f384e", size = 4834462, upload-time = "2025-10-21T16:22:39.772Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grpcio-status"
|
||||
version = "1.76.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "googleapis-common-protos" },
|
||||
{ name = "grpcio" },
|
||||
{ name = "protobuf" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/3f/46/e9f19d5be65e8423f886813a2a9d0056ba94757b0c5007aa59aed1a961fa/grpcio_status-1.76.0.tar.gz", hash = "sha256:25fcbfec74c15d1a1cb5da3fab8ee9672852dc16a5a9eeb5baf7d7a9952943cd", size = 13679, upload-time = "2025-10-21T16:28:52.545Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/8c/cc/27ba60ad5a5f2067963e6a858743500df408eb5855e98be778eaef8c9b02/grpcio_status-1.76.0-py3-none-any.whl", hash = "sha256:380568794055a8efbbd8871162df92012e0228a5f6dffaf57f2a00c534103b18", size = 14425, upload-time = "2025-10-21T16:28:40.853Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/02/858bdae08e2184b6afe0b18bc3113318522c9cf326a5a1698055edd31f88/google_genai-1.57.0-py3-none-any.whl", hash = "sha256:d63c7a89a1f549c4d14032f41a0cdb4b6fe3f565e2eee6b5e0907a0aeceabefd", size = 713323, upload-time = "2026-01-07T20:38:18.051Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1390,35 +1308,35 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain"
|
||||
version = "1.1.0"
|
||||
version = "1.2.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
{ name = "langgraph" },
|
||||
{ name = "pydantic" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a1/06/be7273c6c15f5a7e64788ed2aa6329dd019170a176977acff7bcde2cdea2/langchain-1.1.0.tar.gz", hash = "sha256:583c892f59873c0329dbe04169fb3234ac794c50780e7c6fb62a61c7b86a981b", size = 528416, upload-time = "2025-11-24T15:31:24.47Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/5f/78/9565319259d92818d96f30d55507ee1072fbf5c008b95a6acecf5e47c4d6/langchain-1.2.3.tar.gz", hash = "sha256:9d6171f9c3c760ca3c7c2cf8518e6f8625380962c488b41e35ebff1f1d611077", size = 548296, upload-time = "2026-01-08T20:26:30.149Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/6f/889c01d22c84934615fa3f2dcf94c2fe76fd0afa7a7d01f9b798059f0ecc/langchain-1.1.0-py3-none-any.whl", hash = "sha256:af080f3a4a779bfa5925de7aacb6dfab83249d4aab9a08f7aa7b9bec3766d8ea", size = 101797, upload-time = "2025-11-24T15:31:23.401Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/e5/9b4f58533f8ce3013b1a993289eb11e8607d9c9d9d14699b29c6ac3b4132/langchain-1.2.3-py3-none-any.whl", hash = "sha256:5cdc7c80f672962b030c4b0d16d0d8f26d849c0ada63a4b8653a20d7505512ae", size = 106428, upload-time = "2026-01-08T20:26:29.162Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-anthropic"
|
||||
version = "1.2.0"
|
||||
version = "1.3.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anthropic" },
|
||||
{ name = "langchain-core" },
|
||||
{ name = "pydantic" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/66/f2/717dcadf0c96960154594409b68bdd5953ab95439e0b65de13cdd5c08785/langchain_anthropic-1.2.0.tar.gz", hash = "sha256:3f3cfad8c519ead2deb21c30dc538b18f4c094704c7874784320cbed7a199453", size = 688803, upload-time = "2025-11-24T14:17:17.424Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0d/b6/ac5ee84e15bf79844c9c791f99a614c7ec7e1a63c2947e55977be01a81b4/langchain_anthropic-1.3.1.tar.gz", hash = "sha256:4f3d7a4a7729ab1aeaf62d32c87d4d227c1b5421668ca9e3734562b383470b07", size = 708940, upload-time = "2026-01-05T21:07:19.345Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ae/f4/f684725bd375208130ff3e9878ff3e671d888eec89a834617f3d7bcc14c9/langchain_anthropic-1.2.0-py3-none-any.whl", hash = "sha256:f489df97833e12ca0360a098eb9d04e410752840416be87ab60b0a3e120a99fe", size = 49512, upload-time = "2025-11-24T14:17:16.048Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/4f/7a5b32764addf4b757545b89899b9d76688176f19e4ee89868e3b8bbfd0f/langchain_anthropic-1.3.1-py3-none-any.whl", hash = "sha256:1fc28cf8037c30597ee6172fc2ff9e345efe8149a8c2a39897b1eebba2948322", size = 46328, upload-time = "2026-01-05T21:07:18.261Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "1.1.0"
|
||||
version = "1.2.6"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
@@ -1428,39 +1346,40 @@ dependencies = [
|
||||
{ name = "pyyaml" },
|
||||
{ name = "tenacity" },
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "uuid-utils" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1e/17/67c1cc2ace919e2b02dd9d783154d7fb3f1495a4ef835d9cd163b7855ac2/langchain_core-1.1.0.tar.gz", hash = "sha256:2b76a82d427922c8bc51c08404af4fc2a29e9f161dfe2297cb05091e810201e7", size = 781995, upload-time = "2025-11-21T21:01:26.958Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b9/ce/ba5ed5ea6df22965b2893c2ed28ebb456204962723d408904c4acfa5e942/langchain_core-1.2.6.tar.gz", hash = "sha256:b4e7841dd7f8690375aa07c54739178dc2c635147d475e0c2955bf82a1afa498", size = 833343, upload-time = "2026-01-02T21:35:44.749Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/71/1e/e129fc471a2d2a7b3804480a937b5ab9319cab9f4142624fcb115f925501/langchain_core-1.1.0-py3-none-any.whl", hash = "sha256:2c9f27dadc6d21ed4aa46506a37a56e6a7e2d2f9141922dc5c251ba921822ee6", size = 473752, upload-time = "2025-11-21T21:01:25.841Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/40/0655892c245d8fbe6bca6d673ab5927e5c3ab7be143de40b52289a0663bc/langchain_core-1.2.6-py3-none-any.whl", hash = "sha256:aa6ed954b4b1f4504937fe75fdf674317027e9a91ba7a97558b0de3dc8004e34", size = 489096, upload-time = "2026-01-02T21:35:43.391Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-google-genai"
|
||||
version = "3.2.0"
|
||||
version = "4.1.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "filetype" },
|
||||
{ name = "google-ai-generativelanguage" },
|
||||
{ name = "google-genai" },
|
||||
{ name = "langchain-core" },
|
||||
{ name = "pydantic" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ff/27/f3c8f47b7c194c42a7ea38e5b91b412c4bd45f97e702a96edad659312437/langchain_google_genai-3.2.0.tar.gz", hash = "sha256:1fa620ea9c655a37537e95438857c423e1a3599b5a665b8dd87064c76ee95b72", size = 242146, upload-time = "2025-11-24T14:33:11.205Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ba/85/078d5aba488a82a53b8372ac1037dee4f64b020bac69e6a07e37a5059059/langchain_google_genai-4.1.3.tar.gz", hash = "sha256:28966c8fe58c9a401fdc37aeeeb0eb51744210803838ce050f022fc53d2f994e", size = 277024, upload-time = "2026-01-05T23:29:34.362Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/83/9d/c79a367e3379cf6b7d0cc43d558a411a5097d55291f2ce2f573420adb523/langchain_google_genai-3.2.0-py3-none-any.whl", hash = "sha256:689fc159d4623a184678e24771f6d52373e983a8fc8d342e44352aaf28e9445d", size = 57604, upload-time = "2025-11-24T14:33:10.112Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/aa/ca61dc2d202a23d7605a5c0ea24bd86a39a5c23c932a166b87c7797747c5/langchain_google_genai-4.1.3-py3-none-any.whl", hash = "sha256:5d710e2dcf449d49704bdbcd31729be90b386fa008395f9552a5c090241de1a5", size = 66262, upload-time = "2026-01-05T23:29:32.924Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-openai"
|
||||
version = "1.0.2"
|
||||
version = "1.1.7"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
{ name = "openai" },
|
||||
{ name = "tiktoken" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b3/3c/edb7ffca76fdcfd938ce8380bf8ec79a0a8be41ba7fdbf6f9fe1cb5fd1a8/langchain_openai-1.0.2.tar.gz", hash = "sha256:621e8295c52db9a1fc74806a0bd227ea215c132c6c5e421d2982c9ee78468769", size = 1025578, upload-time = "2025-11-03T14:08:32.121Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/38/b7/30bfc4d1b658a9ee524bcce3b0b2ec9c45a11c853a13c4f0c9da9882784b/langchain_openai-1.1.7.tar.gz", hash = "sha256:f5ec31961ed24777548b63a5fe313548bc6e0eb9730d6552b8c6418765254c81", size = 1039134, upload-time = "2026-01-07T19:44:59.728Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/78/9b/7af1d539a051d195c5ecc5990ebd483f208c40f75a8a9532846d16762704/langchain_openai-1.0.2-py3-none-any.whl", hash = "sha256:b3eb9b82752063b46452aa868d8c8bc1604e57631648c3bc325bba58d3aeb143", size = 81934, upload-time = "2025-11-03T14:08:30.655Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/64/a1/50e7596aca775d8c3883eceeaf47489fac26c57c1abe243c00174f715a8a/langchain_openai-1.1.7-py3-none-any.whl", hash = "sha256:34e9cd686aac1a120d6472804422792bf8080a2103b5d21ee450c9e42d053815", size = 84753, upload-time = "2026-01-07T19:44:58.629Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1482,15 +1401,29 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langgraph-checkpoint"
|
||||
version = "3.0.1"
|
||||
version = "2.1.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
{ name = "ormsgpack" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0f/07/2b1c042fa87d40cf2db5ca27dc4e8dd86f9a0436a10aa4361a8982718ae7/langgraph_checkpoint-3.0.1.tar.gz", hash = "sha256:59222f875f85186a22c494aedc65c4e985a3df27e696e5016ba0b98a5ed2cee0", size = 137785, upload-time = "2025-11-04T21:55:47.774Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/29/83/6404f6ed23a91d7bc63d7df902d144548434237d017820ceaa8d014035f2/langgraph_checkpoint-2.1.2.tar.gz", hash = "sha256:112e9d067a6eff8937caf198421b1ffba8d9207193f14ac6f89930c1260c06f9", size = 142420, upload-time = "2025-10-07T17:45:17.129Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/48/e3/616e3a7ff737d98c1bbb5700dd62278914e2a9ded09a79a1fa93cf24ce12/langgraph_checkpoint-3.0.1-py3-none-any.whl", hash = "sha256:9b04a8d0edc0474ce4eaf30c5d731cee38f11ddff50a6177eead95b5c4e4220b", size = 46249, upload-time = "2025-11-04T21:55:46.472Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/f2/06bf5addf8ee664291e1b9ffa1f28fc9d97e59806dc7de5aea9844cbf335/langgraph_checkpoint-2.1.2-py3-none-any.whl", hash = "sha256:911ebffb069fd01775d4b5184c04aaafc2962fcdf50cf49d524cd4367c4d0c60", size = 45763, upload-time = "2025-10-07T17:45:16.19Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langgraph-checkpoint-sqlite"
|
||||
version = "2.0.11"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "aiosqlite" },
|
||||
{ name = "langgraph-checkpoint" },
|
||||
{ name = "sqlite-vec" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d2/aa/5f9e9de74a6d0a9b77c703db0068d0f0cdc8dbc2e9b292ae95f4de115a44/langgraph_checkpoint_sqlite-2.0.11.tar.gz", hash = "sha256:e9337204c27b01a29edff65c1ecb7da0ca8ac7f1bd66b405617459043ac6c3ed", size = 109749, upload-time = "2025-07-25T17:32:07.773Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/d4/c56f6b0e8c8211791c9954bef0edaef3dc2e118cf33800be44c7b90432bd/langgraph_checkpoint_sqlite-2.0.11-py3-none-any.whl", hash = "sha256:11c40d93225ce99fa2800332c97b16280addf9f15274def32c4d547955290d3f", size = 31191, upload-time = "2025-07-25T17:32:06.355Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1537,6 +1470,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/80/1a/0c84f7096d41d64425d29db549c8d6fe075f925a5f2022e8087d01d862c2/langsmith-0.4.47-py3-none-any.whl", hash = "sha256:b9e514611d4e1570e33595d33ccb1fe6eda9f96c5f961095a138651f746c1ef5", size = 411207, upload-time = "2025-11-24T16:01:59.123Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "linkify-it-py"
|
||||
version = "2.0.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "uc-micro-py" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/2a/ae/bb56c6828e4797ba5a4821eec7c43b8bf40f69cda4d4f5f8c8a2810ec96a/linkify-it-py-2.0.3.tar.gz", hash = "sha256:68cda27e162e9215c17d786649d1da0021a451bdc436ef9e0fa0ba5234b9b048", size = 27946, upload-time = "2024-02-04T14:48:04.179Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/04/1e/b832de447dee8b582cac175871d2f6c3d5077cc56d5575cadba1fd1cccfa/linkify_it_py-2.0.3-py3-none-any.whl", hash = "sha256:6bcbc417b0ac14323382aef5c5192c0075bf8a9d6b41820a2b66371eac6b6d79", size = 19820, upload-time = "2024-02-04T14:48:02.496Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "markdown-it-py"
|
||||
version = "4.0.0"
|
||||
@@ -1549,6 +1494,11 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
linkify = [
|
||||
{ name = "linkify-it-py" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "markdownify"
|
||||
version = "1.2.0"
|
||||
@@ -1571,6 +1521,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/df/081ea8c41696d598e7cea4f101e49da718a9b6c9dcaaad4e76dfc11a022c/marshmallow-4.1.0-py3-none-any.whl", hash = "sha256:9901660499be3b880dc92d6b5ee0b9a79e94265b7793f71021f92040c07129f1", size = 48286, upload-time = "2025-11-01T15:40:35.542Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mdit-py-plugins"
|
||||
version = "0.5.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "markdown-it-py" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b2/fd/a756d36c0bfba5f6e39a1cdbdbfdd448dc02692467d83816dff4592a1ebc/mdit_py_plugins-0.5.0.tar.gz", hash = "sha256:f4918cb50119f50446560513a8e311d574ff6aaed72606ddae6d35716fe809c6", size = 44655, upload-time = "2025-08-11T07:25:49.083Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/86/dd6e5db36df29e76c7a7699123569a4a18c1623ce68d826ed96c62643cae/mdit_py_plugins-0.5.0-py3-none-any.whl", hash = "sha256:07a08422fc1936a5d26d146759e9155ea466e842f5ab2f7d2266dd084c8dab1f", size = 57205, upload-time = "2025-08-11T07:25:47.597Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mdurl"
|
||||
version = "0.1.2"
|
||||
@@ -2121,6 +2083,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/95/7e/f896623c3c635a90537ac093c6a618ebe1a90d87206e42309cb5d98a1b9e/pillow-12.0.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:b290fd8aa38422444d4b50d579de197557f182ef1068b75f5aa8558638b8d0a5", size = 6997850, upload-time = "2025-10-15T18:24:11.495Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "platformdirs"
|
||||
version = "4.5.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pluggy"
|
||||
version = "1.6.0"
|
||||
@@ -2241,18 +2212,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/5b/5a/bc7b4a4ef808fa59a816c17b20c4bef6884daebbdf627ff2a161da67da19/propcache-0.4.1-py3-none-any.whl", hash = "sha256:af2a6052aeb6cf17d3e46ee169099044fd8224cbaf75c76a2ef596e8163e2237", size = 13305, upload-time = "2025-10-08T19:49:00.792Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proto-plus"
|
||||
version = "1.26.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "protobuf" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f4/ac/87285f15f7cce6d4a008f33f1757fb5a13611ea8914eb58c3d0d26243468/proto_plus-1.26.1.tar.gz", hash = "sha256:21a515a4c4c0088a773899e23c7bbade3d18f9c66c73edd4c7ee3816bc96a012", size = 56142, upload-time = "2025-03-10T15:54:38.843Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/4e/6d/280c4c2ce28b1593a19ad5239c8b826871fc6ec275c21afc8e1820108039/proto_plus-1.26.1-py3-none-any.whl", hash = "sha256:13285478c2dcf2abb829db158e1047e2f1e8d63a077d94263c2b88b043c75a66", size = 50163, upload-time = "2025-03-10T15:54:37.335Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "protobuf"
|
||||
version = "6.33.0"
|
||||
@@ -2866,6 +2825,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/14/a0/bb38d3b76b8cae341dad93a2dd83ab7462e6dbcdd84d43f54ee60a8dc167/soupsieve-2.8-py3-none-any.whl", hash = "sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c", size = 36679, upload-time = "2025-08-27T15:39:50.179Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sqlite-vec"
|
||||
version = "0.1.6"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/88/ed/aabc328f29ee6814033d008ec43e44f2c595447d9cccd5f2aabe60df2933/sqlite_vec-0.1.6-py3-none-macosx_10_6_x86_64.whl", hash = "sha256:77491bcaa6d496f2acb5cc0d0ff0b8964434f141523c121e313f9a7d8088dee3", size = 164075, upload-time = "2024-11-20T16:40:29.847Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/57/05604e509a129b22e303758bfa062c19afb020557d5e19b008c64016704e/sqlite_vec-0.1.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:fdca35f7ee3243668a055255d4dee4dea7eed5a06da8cad409f89facf4595361", size = 165242, upload-time = "2024-11-20T16:40:31.206Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/48/dbb2cc4e5bad88c89c7bb296e2d0a8df58aab9edc75853728c361eefc24f/sqlite_vec-0.1.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b0519d9cd96164cd2e08e8eed225197f9cd2f0be82cb04567692a0a4be02da3", size = 103704, upload-time = "2024-11-20T16:40:33.729Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/80/76/97f33b1a2446f6ae55e59b33869bed4eafaf59b7f4c662c8d9491b6a714a/sqlite_vec-0.1.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux1_x86_64.whl", hash = "sha256:823b0493add80d7fe82ab0fe25df7c0703f4752941aee1c7b2b02cec9656cb24", size = 151556, upload-time = "2024-11-20T16:40:35.387Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/98/e8bc58b178266eae2fcf4c9c7a8303a8d41164d781b32d71097924a6bebe/sqlite_vec-0.1.6-py3-none-win_amd64.whl", hash = "sha256:c65bcfd90fa2f41f9000052bcb8bb75d38240b2dae49225389eca6c3136d3f0c", size = 281540, upload-time = "2024-11-20T16:40:37.296Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "synchronicity"
|
||||
version = "0.10.3"
|
||||
@@ -2901,6 +2872,36 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textual"
|
||||
version = "6.12.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "markdown-it-py", extra = ["linkify"] },
|
||||
{ name = "mdit-py-plugins" },
|
||||
{ name = "platformdirs" },
|
||||
{ name = "pygments" },
|
||||
{ name = "rich" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/39/55/29416ef63de4c37b37da217b94439a28496a4dc585209f5bf1437a61d120/textual-6.12.0.tar.gz", hash = "sha256:a32e8edbf6abdb0c42d486e96bdf419eb3aa378edb1b1271b84637f3dbd64c73", size = 1584182, upload-time = "2026-01-02T09:42:30.415Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/13/f8/2a6a6ff1d07788f635493867d5a4003dfecacad16af1fdc9814d10daca3d/textual-6.12.0-py3-none-any.whl", hash = "sha256:cf9ea9a54d213c7736efe9fef440c7f49218d4e6ab75279afd060eded9c567ec", size = 714912, upload-time = "2026-01-02T09:42:28.786Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textual-autocomplete"
|
||||
version = "4.0.6"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "textual" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1e/3a/80411bc7b94969eb116ad1b18db90f8dce8a1de441278c4a81fee55a27ca/textual_autocomplete-4.0.6.tar.gz", hash = "sha256:2ba2f0d767be4480ecacb3e4b130cf07340e033c3500fc424fed9125d27a4586", size = 97967, upload-time = "2025-09-24T21:19:20.213Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9f/66/ebe744d79c87f25a42d2654dddbd09462edd595f2ded715245a51a546461/textual_autocomplete-4.0.6-py3-none-any.whl", hash = "sha256:bff69c19386e2cbb4a007503b058dc37671d480a4fa2ddb3959c15ceb4aff9b5", size = 16499, upload-time = "2025-09-24T21:19:18.489Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tiktoken"
|
||||
version = "0.12.0"
|
||||
@@ -3099,6 +3100,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "uc-micro-py"
|
||||
version = "1.0.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/91/7a/146a99696aee0609e3712f2b44c6274566bc368dfe8375191278045186b8/uc-micro-py-1.0.3.tar.gz", hash = "sha256:d321b92cff673ec58027c04015fcaa8bb1e005478643ff4a500882eaab88c48a", size = 6043, upload-time = "2024-02-09T16:52:01.654Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/37/87/1f677586e8ac487e29672e4b17455758fce261de06a0d086167bb760361a/uc_micro_py-1.0.3-py3-none-any.whl", hash = "sha256:db1dffff340817673d7b466ec86114a9dc0e9d4d9b5ba229d9d60e5c12600cd5", size = 6229, upload-time = "2024-02-09T16:52:00.371Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "urllib3"
|
||||
version = "2.5.0"
|
||||
@@ -3110,22 +3120,31 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "uuid-utils"
|
||||
version = "0.11.1"
|
||||
version = "0.12.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e2/ef/b6c1fd4fee3b2854bf9d602530ab8b6624882e2691c15a9c4d22ea8c03eb/uuid_utils-0.11.1.tar.gz", hash = "sha256:7ef455547c2ccb712840b106b5ab006383a9bfe4125ba1c5ab92e47bcbf79b46", size = 19933, upload-time = "2025-10-02T13:32:09.526Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0b/0e/512fb221e4970c2f75ca9dae412d320b7d9ddc9f2b15e04ea8e44710396c/uuid_utils-0.12.0.tar.gz", hash = "sha256:252bd3d311b5d6b7f5dfce7a5857e27bb4458f222586bb439463231e5a9cbd64", size = 20889, upload-time = "2025-12-01T17:29:55.494Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/40/f5/254d7ce4b3aa4a1a3a4f279e0cc74eec8b4d3a61641d8ffc6e983907f2ca/uuid_utils-0.11.1-cp39-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:4bc8cf73c375b9ea11baf70caacc2c4bf7ce9bfd804623aa0541e5656f3dbeaf", size = 581019, upload-time = "2025-10-02T13:31:32.239Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/68/e6/f7d14c4e1988d8beb3ac9bd773f370376c704925bdfb07380f5476bb2986/uuid_utils-0.11.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:0d2cb3bcc6f5862d08a0ee868b18233bc63ba9ea0e85ea9f3f8e703983558eba", size = 294377, upload-time = "2025-10-02T13:31:34.01Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8e/40/847a9a0258e7a2a14b015afdaa06ee4754a2680db7b74bac159d594eeb18/uuid_utils-0.11.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:463400604f623969f198aba9133ebfd717636f5e34257340302b1c3ff685dc0f", size = 328070, upload-time = "2025-10-02T13:31:35.619Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/44/0c/c5d342d31860c9b4f481ef31a4056825961f9b462d216555e76dcee580ea/uuid_utils-0.11.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aef66b935342b268c6ffc1796267a1d9e73135740a10fe7e4098e1891cbcc476", size = 333610, upload-time = "2025-10-02T13:31:37.058Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e1/4b/52edc023ffcb9ab9a4042a58974a79c39ba7a565e683f1fd9814b504cf13/uuid_utils-0.11.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fd65c41b81b762278997de0d027161f27f9cc4058fa57bbc0a1aaa63a63d6d1a", size = 475669, upload-time = "2025-10-02T13:31:38.38Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/59/81/ee55ee63264531bb1c97b5b6033ad6ec81b5cd77f89174e9aef3af3d8889/uuid_utils-0.11.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccfac9d5d7522d61accabb8c68448ead6407933415e67e62123ed6ed11f86510", size = 331946, upload-time = "2025-10-02T13:31:39.66Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cf/07/5d4be27af0e9648afa512f0d11bb6d96cb841dd6d29b57baa3fbf55fd62e/uuid_utils-0.11.1-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:003f48f05c01692d0c1f7e413d194e7299a1a364e0047a4eb904d3478b84eca1", size = 352920, upload-time = "2025-10-02T13:31:40.94Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5b/48/a69dddd9727512b0583b87bfff97d82a8813b28fb534a183c9e37033cfef/uuid_utils-0.11.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:a5c936042120bdc30d62f539165beaa4a6ba7e817a89e5409a6f06dc62c677a9", size = 509413, upload-time = "2025-10-02T13:31:42.547Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/66/0d/1b529a3870c2354dd838d5f133a1cba75220242b0061f04a904ca245a131/uuid_utils-0.11.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:2e16dcdbdf4cd34ffb31ead6236960adb50e6c962c9f4554a6ecfdfa044c6259", size = 529454, upload-time = "2025-10-02T13:31:44.338Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/f2/04a3f77c85585aac09d546edaf871a4012052fb8ace6dbddd153b4d50f02/uuid_utils-0.11.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f8b21fed11b23134502153d652c77c3a37fa841a9aa15a4e6186d440a22f1a0e", size = 498084, upload-time = "2025-10-02T13:31:45.601Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/89/08/538b380b4c4b220f3222c970930fe459cc37f1dfc6c8dc912568d027f17d/uuid_utils-0.11.1-cp39-abi3-win32.whl", hash = "sha256:72abab5ab27c1b914e3f3f40f910532ae242df1b5f0ae43f1df2ef2f610b2a8c", size = 174314, upload-time = "2025-10-02T13:31:47.269Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/66/971ec830094ac1c7d46381678f7138c1805015399805e7dd7769c893c9c8/uuid_utils-0.11.1-cp39-abi3-win_amd64.whl", hash = "sha256:5ed9962f8993ef2fd418205f92830c29344102f86871d99b57cef053abf227d9", size = 179214, upload-time = "2025-10-02T13:31:48.344Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/43/de5cd49a57b6293b911b6a9a62fc03e55db9f964da7d5882d9edbee1e9d2/uuid_utils-0.12.0-cp39-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:3b9b30707659292f207b98f294b0e081f6d77e1fbc760ba5b41331a39045f514", size = 603197, upload-time = "2025-12-01T17:29:30.104Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/fa/5fd1d8c9234e44f0c223910808cde0de43bb69f7df1349e49b1afa7f2baa/uuid_utils-0.12.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:add3d820c7ec14ed37317375bea30249699c5d08ff4ae4dbee9fc9bce3bfbf65", size = 305168, upload-time = "2025-12-01T17:29:31.384Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/c6/8633ac9942bf9dc97a897b5154e5dcffa58816ec4dd780b3b12b559ff05c/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b8fce83ecb3b16af29c7809669056c4b6e7cc912cab8c6d07361645de12dd79", size = 340580, upload-time = "2025-12-01T17:29:32.362Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/88/8a61307b04b4da1c576373003e6d857a04dade52ab035151d62cb84d5cb5/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec921769afcb905035d785582b0791d02304a7850fbd6ce924c1a8976380dfc6", size = 346771, upload-time = "2025-12-01T17:29:33.708Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/fb/aab2dcf94b991e62aa167457c7825b9b01055b884b888af926562864398c/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6f3b060330f5899a92d5c723547dc6a95adef42433e9748f14c66859a7396664", size = 474781, upload-time = "2025-12-01T17:29:35.237Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/7a/dbd5e49c91d6c86dba57158bbfa0e559e1ddf377bb46dcfd58aea4f0d567/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:908dfef7f0bfcf98d406e5dc570c25d2f2473e49b376de41792b6e96c1d5d291", size = 343685, upload-time = "2025-12-01T17:29:36.677Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/19/8c4b1d9f450159733b8be421a4e1fb03533709b80ed3546800102d085572/uuid_utils-0.12.0-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4c6a24148926bd0ca63e8a2dabf4cc9dc329a62325b3ad6578ecd60fbf926506", size = 366482, upload-time = "2025-12-01T17:29:37.979Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/82/43/c79a6e45687647f80a159c8ba34346f287b065452cc419d07d2212d38420/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:64a91e632669f059ef605f1771d28490b1d310c26198e46f754e8846dddf12f4", size = 523132, upload-time = "2025-12-01T17:29:39.293Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/a2/b2d75a621260a40c438aa88593827dfea596d18316520a99e839f7a5fb9d/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:93c082212470bb4603ca3975916c205a9d7ef1443c0acde8fbd1e0f5b36673c7", size = 614218, upload-time = "2025-12-01T17:29:40.315Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/6b/ba071101626edd5a6dabf8525c9a1537ff3d885dbc210540574a03901fef/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:431b1fb7283ba974811b22abd365f2726f8f821ab33f0f715be389640e18d039", size = 546241, upload-time = "2025-12-01T17:29:41.656Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/12/9a942b81c0923268e6d85bf98d8f0a61fcbcd5e432fef94fdf4ce2ef8748/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2ffd7838c40149100299fa37cbd8bab5ee382372e8e65a148002a37d380df7c8", size = 511842, upload-time = "2025-12-01T17:29:43.107Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a9/a7/c326f5163dd48b79368b87d8a05f5da4668dd228a3f5ca9d79d5fee2fc40/uuid_utils-0.12.0-cp39-abi3-win32.whl", hash = "sha256:487f17c0fee6cbc1d8b90fe811874174a9b1b5683bf2251549e302906a50fed3", size = 179088, upload-time = "2025-12-01T17:29:44.492Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/92/41c8734dd97213ee1d5ae435cf4499705dc4f2751e3b957fd12376f61784/uuid_utils-0.12.0-cp39-abi3-win_amd64.whl", hash = "sha256:9598e7c9da40357ae8fffc5d6938b1a7017f09a1acbcc95e14af8c65d48c655a", size = 183003, upload-time = "2025-12-01T17:29:45.47Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/f9/52ab0359618987331a1f739af837d26168a4b16281c9c3ab46519940c628/uuid_utils-0.12.0-cp39-abi3-win_arm64.whl", hash = "sha256:c9bea7c5b2aa6f57937ebebeee4d4ef2baad10f86f1b97b58a3f6f34c14b4e84", size = 182975, upload-time = "2025-12-01T17:29:46.444Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ef/f7/6c55b7722cede3b424df02ed5cddb25c19543abda2f95fa4cfc34a892ae5/uuid_utils-0.12.0-pp311-pypy311_pp73-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:e2209d361f2996966ab7114f49919eb6aaeabc6041672abbbbf4fdbb8ec1acc0", size = 593065, upload-time = "2025-12-01T17:29:47.507Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/40/ce5fe8e9137dbd5570e0016c2584fca43ad81b11a1cef809a1a1b4952ab7/uuid_utils-0.12.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d9636bcdbd6cfcad2b549c352b669412d0d1eb09be72044a2f13e498974863cd", size = 300047, upload-time = "2025-12-01T17:29:48.596Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/9b/31c5d0736d7b118f302c50214e581f40e904305d8872eb0f0c921d50e138/uuid_utils-0.12.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cd8543a3419251fb78e703ce3b15fdfafe1b7c542cf40caf0775e01db7e7674", size = 335165, upload-time = "2025-12-01T17:29:49.755Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/5c/d80b4d08691c9d7446d0ad58fd41503081a662cfd2c7640faf68c64d8098/uuid_utils-0.12.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e98db2d8977c052cb307ae1cb5cc37a21715e8d415dbc65863b039397495a013", size = 341437, upload-time = "2025-12-01T17:29:51.112Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/b3/9dccdc6f3c22f6ef5bd381ae559173f8a1ae185ae89ed1f39f499d9d8b02/uuid_utils-0.12.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8f2bdf5e4ffeb259ef6d15edae92aed60a1d6f07cbfab465d836f6b12b48da8", size = 469123, upload-time = "2025-12-01T17:29:52.389Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fd/90/6c35ef65fbc49f8189729839b793a4a74a7dd8c5aa5eb56caa93f8c97732/uuid_utils-0.12.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c3ec53c0cb15e1835870c139317cc5ec06e35aa22843e3ed7d9c74f23f23898", size = 335892, upload-time = "2025-12-01T17:29:53.44Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6b/c7/e3f3ce05c5af2bf86a0938d22165affe635f4dcbfd5687b1dacc042d3e0e/uuid_utils-0.12.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:84e5c0eba209356f7f389946a3a47b2cc2effd711b3fc7c7f155ad9f7d45e8a3", size = 360693, upload-time = "2025-12-01T17:29:54.558Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -7,11 +7,7 @@ lint_tests: PYTHON_FILES=tests
|
||||
|
||||
lint lint_diff lint_package lint_tests:
|
||||
[ "$(PYTHON_FILES)" = "" ] || uv run --all-groups ruff format $(PYTHON_FILES) --diff
|
||||
@if [ "$(LINT)" != "minimal" ]; then \
|
||||
if [ "$(PYTHON_FILES)" != "" ]; then \
|
||||
uv run --all-groups ruff check $(PYTHON_FILES); \
|
||||
fi; \
|
||||
fi
|
||||
[ "$(PYTHON_FILES)" = "" ] || uv run --all-groups ruff check $(PYTHON_FILES)
|
||||
@if [ "$(LINT)" != "minimal" ]; then \
|
||||
if [ "$(PYTHON_FILES)" != "" ]; then \
|
||||
mkdir -p $(MYPY_CACHE) && uv run --all-groups mypy $(PYTHON_FILES) --cache-dir $(MYPY_CACHE); \
|
||||
|
||||
@@ -2,6 +2,14 @@
|
||||
|
||||
from deepagents.graph import create_deep_agent
|
||||
from deepagents.middleware.filesystem import FilesystemMiddleware
|
||||
from deepagents.middleware.memory import MemoryMiddleware
|
||||
from deepagents.middleware.subagents import CompiledSubAgent, SubAgent, SubAgentMiddleware
|
||||
|
||||
__all__ = ["CompiledSubAgent", "FilesystemMiddleware", "SubAgent", "SubAgentMiddleware", "create_deep_agent"]
|
||||
__all__ = [
|
||||
"CompiledSubAgent",
|
||||
"FilesystemMiddleware",
|
||||
"MemoryMiddleware",
|
||||
"SubAgent",
|
||||
"SubAgentMiddleware",
|
||||
"create_deep_agent",
|
||||
]
|
||||
|
||||
@@ -1,4 +1,22 @@
|
||||
"""CompositeBackend: 경로 접두사(prefix)를 기반으로 작업을 다른 백엔드로 라우팅합니다."""
|
||||
"""Composite backend that routes file operations by path prefix.
|
||||
|
||||
Routes operations to different backends based on path prefixes. Use this when you
|
||||
need different storage strategies for different paths (e.g., state for temp files,
|
||||
persistent store for memories).
|
||||
|
||||
Examples:
|
||||
```python
|
||||
from deepagents.backends.composite import CompositeBackend
|
||||
from deepagents.backends.state import StateBackend
|
||||
from deepagents.backends.store import StoreBackend
|
||||
|
||||
runtime = make_runtime()
|
||||
composite = CompositeBackend(default=StateBackend(runtime), routes={"/memories/": StoreBackend(runtime)})
|
||||
|
||||
composite.write("/temp.txt", "ephemeral")
|
||||
composite.write("/memories/note.md", "persistent")
|
||||
```
|
||||
"""
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
@@ -16,12 +34,38 @@ from deepagents.backends.protocol import (
|
||||
from deepagents.backends.state import StateBackend
|
||||
|
||||
|
||||
class CompositeBackend:
|
||||
class CompositeBackend(BackendProtocol):
|
||||
"""Routes file operations to different backends by path prefix.
|
||||
|
||||
Matches paths against route prefixes (longest first) and delegates to the
|
||||
corresponding backend. Unmatched paths use the default backend.
|
||||
|
||||
Attributes:
|
||||
default: Backend for paths that don't match any route.
|
||||
routes: Map of path prefixes to backends (e.g., {"/memories/": store_backend}).
|
||||
sorted_routes: Routes sorted by length (longest first) for correct matching.
|
||||
|
||||
Examples:
|
||||
```python
|
||||
composite = CompositeBackend(default=StateBackend(runtime), routes={"/memories/": StoreBackend(runtime), "/cache/": StoreBackend(runtime)})
|
||||
|
||||
composite.write("/temp.txt", "data")
|
||||
composite.write("/memories/note.txt", "data")
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
default: BackendProtocol | StateBackend,
|
||||
routes: dict[str, BackendProtocol],
|
||||
) -> None:
|
||||
"""Initialize composite backend.
|
||||
|
||||
Args:
|
||||
default: Backend for paths that don't match any route.
|
||||
routes: Map of path prefixes to backends. Prefixes must start with "/"
|
||||
and should end with "/" (e.g., "/memories/").
|
||||
"""
|
||||
# Default backend
|
||||
self.default = default
|
||||
|
||||
@@ -32,14 +76,14 @@ class CompositeBackend:
|
||||
self.sorted_routes = sorted(routes.items(), key=lambda x: len(x[0]), reverse=True)
|
||||
|
||||
def _get_backend_and_key(self, key: str) -> tuple[BackendProtocol, str]:
|
||||
"""어떤 백엔드가 이 키를 처리하는지 결정하고 접두사를 제거합니다.
|
||||
"""Get backend for path and strip route prefix.
|
||||
|
||||
Args:
|
||||
key: 원본 파일 경로
|
||||
key: File path to route.
|
||||
|
||||
Returns:
|
||||
(backend, stripped_key) 튜플. stripped_key는 라우트 접두사가
|
||||
제거된 상태입니다 (하지만 선행 슬래시는 유지됨).
|
||||
Tuple of (backend, stripped_path). The stripped path has the route
|
||||
prefix removed but keeps the leading slash.
|
||||
"""
|
||||
# Check routes in order of length (longest first)
|
||||
for prefix, backend in self.sorted_routes:
|
||||
@@ -53,14 +97,23 @@ class CompositeBackend:
|
||||
return self.default, key
|
||||
|
||||
def ls_info(self, path: str) -> list[FileInfo]:
|
||||
"""지정된 디렉토리의 파일과 디렉토리를 나열합니다 (비재귀적).
|
||||
"""List directory contents (non-recursive).
|
||||
|
||||
If path matches a route, lists only that backend. If path is "/", aggregates
|
||||
default backend plus virtual route directories. Otherwise lists default backend.
|
||||
|
||||
Args:
|
||||
path: 디렉토리의 절대 경로.
|
||||
path: Absolute directory path starting with "/".
|
||||
|
||||
Returns:
|
||||
디렉토리 바로 아래에 있는 파일 및 디렉토리에 대한 FileInfo 유사 dict 목록 (라우트 접두사 추가됨).
|
||||
디렉토리는 경로 끝에 /가 붙으며 is_dir=True입니다.
|
||||
List of FileInfo dicts. Directories have trailing "/" and is_dir=True.
|
||||
Route prefixes are restored in returned paths.
|
||||
|
||||
Examples:
|
||||
```python
|
||||
infos = composite.ls_info("/")
|
||||
infos = composite.ls_info("/memories/")
|
||||
```
|
||||
"""
|
||||
# Check if path matches a specific route
|
||||
for route_prefix, backend in self.sorted_routes:
|
||||
@@ -82,12 +135,14 @@ class CompositeBackend:
|
||||
results.extend(self.default.ls_info(path))
|
||||
for route_prefix, backend in self.sorted_routes:
|
||||
# Add the route itself as a directory (e.g., /memories/)
|
||||
results.append({
|
||||
"path": route_prefix,
|
||||
"is_dir": True,
|
||||
"size": 0,
|
||||
"modified_at": "",
|
||||
})
|
||||
results.append(
|
||||
{
|
||||
"path": route_prefix,
|
||||
"is_dir": True,
|
||||
"size": 0,
|
||||
"modified_at": "",
|
||||
}
|
||||
)
|
||||
|
||||
results.sort(key=lambda x: x.get("path", ""))
|
||||
return results
|
||||
@@ -96,7 +151,7 @@ class CompositeBackend:
|
||||
return self.default.ls_info(path)
|
||||
|
||||
async def als_info(self, path: str) -> list[FileInfo]:
|
||||
"""ls_info의 비동기 버전입니다."""
|
||||
"""Async version of ls_info."""
|
||||
# Check if path matches a specific route
|
||||
for route_prefix, backend in self.sorted_routes:
|
||||
if path.startswith(route_prefix.rstrip("/")):
|
||||
@@ -117,12 +172,14 @@ class CompositeBackend:
|
||||
results.extend(await self.default.als_info(path))
|
||||
for route_prefix, backend in self.sorted_routes:
|
||||
# Add the route itself as a directory (e.g., /memories/)
|
||||
results.append({
|
||||
"path": route_prefix,
|
||||
"is_dir": True,
|
||||
"size": 0,
|
||||
"modified_at": "",
|
||||
})
|
||||
results.append(
|
||||
{
|
||||
"path": route_prefix,
|
||||
"is_dir": True,
|
||||
"size": 0,
|
||||
"modified_at": "",
|
||||
}
|
||||
)
|
||||
|
||||
results.sort(key=lambda x: x.get("path", ""))
|
||||
return results
|
||||
@@ -136,15 +193,15 @@ class CompositeBackend:
|
||||
offset: int = 0,
|
||||
limit: int = 2000,
|
||||
) -> str:
|
||||
"""적절한 백엔드로 라우팅하여 파일 내용을 읽습니다.
|
||||
"""Read file content, routing to appropriate backend.
|
||||
|
||||
Args:
|
||||
file_path: 파일 절대 경로.
|
||||
offset: 읽기 시작할 라인 오프셋 (0부터 시작).
|
||||
limit: 읽을 최대 라인 수.
|
||||
file_path: Absolute file path.
|
||||
offset: Line offset to start reading from (0-indexed).
|
||||
limit: Maximum number of lines to read.
|
||||
|
||||
Returns:
|
||||
라인 번호가 포함된 형식화된 파일 내용, 또는 에러 메시지.
|
||||
Formatted file content with line numbers, or error message.
|
||||
"""
|
||||
backend, stripped_key = self._get_backend_and_key(file_path)
|
||||
return backend.read(stripped_key, offset=offset, limit=limit)
|
||||
@@ -155,7 +212,7 @@ class CompositeBackend:
|
||||
offset: int = 0,
|
||||
limit: int = 2000,
|
||||
) -> str:
|
||||
"""read의 비동기 버전입니다."""
|
||||
"""Async version of read."""
|
||||
backend, stripped_key = self._get_backend_and_key(file_path)
|
||||
return await backend.aread(stripped_key, offset=offset, limit=limit)
|
||||
|
||||
@@ -165,6 +222,28 @@ class CompositeBackend:
|
||||
path: str | None = None,
|
||||
glob: str | None = None,
|
||||
) -> list[GrepMatch] | str:
|
||||
"""Search files for regex pattern.
|
||||
|
||||
Routes to backends based on path: specific route searches one backend,
|
||||
"/" or None searches all backends, otherwise searches default backend.
|
||||
|
||||
Args:
|
||||
pattern: Regex pattern to search for.
|
||||
path: Directory to search. None searches all backends.
|
||||
glob: Glob pattern to filter files (e.g., "*.py", "**/*.txt").
|
||||
Filters by filename, not content.
|
||||
|
||||
Returns:
|
||||
List of GrepMatch dicts with path (route prefix restored), line
|
||||
(1-indexed), and text. Returns error string on failure.
|
||||
|
||||
Examples:
|
||||
```python
|
||||
matches = composite.grep_raw("TODO", path="/memories/")
|
||||
matches = composite.grep_raw("error", path="/")
|
||||
matches = composite.grep_raw("import", path="/", glob="*.py")
|
||||
```
|
||||
"""
|
||||
# If path targets a specific route, search only that backend
|
||||
for route_prefix, backend in self.sorted_routes:
|
||||
if path is not None and path.startswith(route_prefix.rstrip("/")):
|
||||
@@ -174,22 +253,26 @@ class CompositeBackend:
|
||||
return raw
|
||||
return [{**m, "path": f"{route_prefix[:-1]}{m['path']}"} for m in raw]
|
||||
|
||||
# Otherwise, search default and all routed backends and merge
|
||||
all_matches: list[GrepMatch] = []
|
||||
raw_default = self.default.grep_raw(pattern, path, glob) # type: ignore[attr-defined]
|
||||
if isinstance(raw_default, str):
|
||||
# This happens if error occurs
|
||||
return raw_default
|
||||
all_matches.extend(raw_default)
|
||||
|
||||
for route_prefix, backend in self.routes.items():
|
||||
raw = backend.grep_raw(pattern, "/", glob)
|
||||
if isinstance(raw, str):
|
||||
# If path is None or "/", search default and all routed backends and merge
|
||||
# Otherwise, search only the default backend
|
||||
if path is None or path == "/":
|
||||
all_matches: list[GrepMatch] = []
|
||||
raw_default = self.default.grep_raw(pattern, path, glob) # type: ignore[attr-defined]
|
||||
if isinstance(raw_default, str):
|
||||
# This happens if error occurs
|
||||
return raw
|
||||
all_matches.extend({**m, "path": f"{route_prefix[:-1]}{m['path']}"} for m in raw)
|
||||
return raw_default
|
||||
all_matches.extend(raw_default)
|
||||
|
||||
return all_matches
|
||||
for route_prefix, backend in self.routes.items():
|
||||
raw = backend.grep_raw(pattern, "/", glob)
|
||||
if isinstance(raw, str):
|
||||
# This happens if error occurs
|
||||
return raw
|
||||
all_matches.extend({**m, "path": f"{route_prefix[:-1]}{m['path']}"} for m in raw)
|
||||
|
||||
return all_matches
|
||||
# Path specified but doesn't match a route - search only default
|
||||
return self.default.grep_raw(pattern, path, glob) # type: ignore[attr-defined]
|
||||
|
||||
async def agrep_raw(
|
||||
self,
|
||||
@@ -197,7 +280,10 @@ class CompositeBackend:
|
||||
path: str | None = None,
|
||||
glob: str | None = None,
|
||||
) -> list[GrepMatch] | str:
|
||||
"""grep_raw의 비동기 버전입니다."""
|
||||
"""Async version of grep_raw.
|
||||
|
||||
See grep_raw() for detailed documentation on routing behavior and parameters.
|
||||
"""
|
||||
# If path targets a specific route, search only that backend
|
||||
for route_prefix, backend in self.sorted_routes:
|
||||
if path is not None and path.startswith(route_prefix.rstrip("/")):
|
||||
@@ -207,22 +293,26 @@ class CompositeBackend:
|
||||
return raw
|
||||
return [{**m, "path": f"{route_prefix[:-1]}{m['path']}"} for m in raw]
|
||||
|
||||
# Otherwise, search default and all routed backends and merge
|
||||
all_matches: list[GrepMatch] = []
|
||||
raw_default = await self.default.agrep_raw(pattern, path, glob) # type: ignore[attr-defined]
|
||||
if isinstance(raw_default, str):
|
||||
# This happens if error occurs
|
||||
return raw_default
|
||||
all_matches.extend(raw_default)
|
||||
|
||||
for route_prefix, backend in self.routes.items():
|
||||
raw = await backend.agrep_raw(pattern, "/", glob)
|
||||
if isinstance(raw, str):
|
||||
# If path is None or "/", search default and all routed backends and merge
|
||||
# Otherwise, search only the default backend
|
||||
if path is None or path == "/":
|
||||
all_matches: list[GrepMatch] = []
|
||||
raw_default = await self.default.agrep_raw(pattern, path, glob) # type: ignore[attr-defined]
|
||||
if isinstance(raw_default, str):
|
||||
# This happens if error occurs
|
||||
return raw
|
||||
all_matches.extend({**m, "path": f"{route_prefix[:-1]}{m['path']}"} for m in raw)
|
||||
return raw_default
|
||||
all_matches.extend(raw_default)
|
||||
|
||||
return all_matches
|
||||
for route_prefix, backend in self.routes.items():
|
||||
raw = await backend.agrep_raw(pattern, "/", glob)
|
||||
if isinstance(raw, str):
|
||||
# This happens if error occurs
|
||||
return raw
|
||||
all_matches.extend({**m, "path": f"{route_prefix[:-1]}{m['path']}"} for m in raw)
|
||||
|
||||
return all_matches
|
||||
# Path specified but doesn't match a route - search only default
|
||||
return await self.default.agrep_raw(pattern, path, glob) # type: ignore[attr-defined]
|
||||
|
||||
def glob_info(self, pattern: str, path: str = "/") -> list[FileInfo]:
|
||||
results: list[FileInfo] = []
|
||||
@@ -246,7 +336,7 @@ class CompositeBackend:
|
||||
return results
|
||||
|
||||
async def aglob_info(self, pattern: str, path: str = "/") -> list[FileInfo]:
|
||||
"""glob_info의 비동기 버전입니다."""
|
||||
"""Async version of glob_info."""
|
||||
results: list[FileInfo] = []
|
||||
|
||||
# Route based on path, not pattern
|
||||
@@ -272,14 +362,14 @@ class CompositeBackend:
|
||||
file_path: str,
|
||||
content: str,
|
||||
) -> WriteResult:
|
||||
"""적절한 백엔드로 라우팅하여 새 파일을 생성합니다.
|
||||
"""Create a new file, routing to appropriate backend.
|
||||
|
||||
Args:
|
||||
file_path: 파일 절대 경로.
|
||||
content: 문자열 형태의 파일 내용.
|
||||
file_path: Absolute file path.
|
||||
content: File content as a string.
|
||||
|
||||
Returns:
|
||||
성공 메시지 또는 Command 객체, 또는 파일이 이미 존재하는 경우 에러.
|
||||
Success message or Command object, or error if file already exists.
|
||||
"""
|
||||
backend, stripped_key = self._get_backend_and_key(file_path)
|
||||
res = backend.write(stripped_key, content)
|
||||
@@ -301,7 +391,7 @@ class CompositeBackend:
|
||||
file_path: str,
|
||||
content: str,
|
||||
) -> WriteResult:
|
||||
"""write의 비동기 버전입니다."""
|
||||
"""Async version of write."""
|
||||
backend, stripped_key = self._get_backend_and_key(file_path)
|
||||
res = await backend.awrite(stripped_key, content)
|
||||
# If this is a state-backed update and default has state, merge so listings reflect changes
|
||||
@@ -324,16 +414,16 @@ class CompositeBackend:
|
||||
new_string: str,
|
||||
replace_all: bool = False,
|
||||
) -> EditResult:
|
||||
"""적절한 백엔드로 라우팅하여 파일을 편집합니다.
|
||||
"""Edit a file, routing to appropriate backend.
|
||||
|
||||
Args:
|
||||
file_path: 파일 절대 경로.
|
||||
old_string: 찾아서 교체할 문자열.
|
||||
new_string: 교체할 문자열.
|
||||
replace_all: True인 경우 모든 발생을 교체.
|
||||
file_path: Absolute file path.
|
||||
old_string: String to find and replace.
|
||||
new_string: Replacement string.
|
||||
replace_all: If True, replace all occurrences.
|
||||
|
||||
Returns:
|
||||
성공 메시지 또는 Command 객체, 또는 실패 시 에러 메시지.
|
||||
Success message or Command object, or error message on failure.
|
||||
"""
|
||||
backend, stripped_key = self._get_backend_and_key(file_path)
|
||||
res = backend.edit(stripped_key, old_string, new_string, replace_all=replace_all)
|
||||
@@ -356,7 +446,7 @@ class CompositeBackend:
|
||||
new_string: str,
|
||||
replace_all: bool = False,
|
||||
) -> EditResult:
|
||||
"""edit의 비동기 버전입니다."""
|
||||
"""Async version of edit."""
|
||||
backend, stripped_key = self._get_backend_and_key(file_path)
|
||||
res = await backend.aedit(stripped_key, old_string, new_string, replace_all=replace_all)
|
||||
if res.files_update:
|
||||
@@ -375,19 +465,23 @@ class CompositeBackend:
|
||||
self,
|
||||
command: str,
|
||||
) -> ExecuteResponse:
|
||||
"""기본(default) 백엔드를 통해 명령을 실행합니다.
|
||||
|
||||
실행은 경로에 특정되지 않으므로, 항상 기본 백엔드로 위임됩니다.
|
||||
이 기능이 작동하려면 기본 백엔드가 SandboxBackendProtocol을 구현해야 합니다.
|
||||
"""Execute shell command via default backend.
|
||||
|
||||
Args:
|
||||
command: 실행할 전체 쉘 명령 문자열.
|
||||
command: Shell command to execute.
|
||||
|
||||
Returns:
|
||||
결합된 출력, 종료 코드, 잘림(truncation) 플래그를 포함하는 ExecuteResponse.
|
||||
ExecuteResponse with output, exit code, and truncation flag.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: 기본 백엔드가 실행을 지원하지 않는 경우.
|
||||
NotImplementedError: If default backend doesn't implement SandboxBackendProtocol.
|
||||
|
||||
Examples:
|
||||
```python
|
||||
composite = CompositeBackend(default=FilesystemBackend(root_dir="/tmp"), routes={"/memories/": StoreBackend(runtime)})
|
||||
|
||||
result = composite.execute("ls -la")
|
||||
```
|
||||
"""
|
||||
if isinstance(self.default, SandboxBackendProtocol):
|
||||
return self.default.execute(command)
|
||||
@@ -403,7 +497,7 @@ class CompositeBackend:
|
||||
self,
|
||||
command: str,
|
||||
) -> ExecuteResponse:
|
||||
"""execute의 비동기 버전입니다."""
|
||||
"""Async version of execute."""
|
||||
if isinstance(self.default, SandboxBackendProtocol):
|
||||
return await self.default.aexecute(command)
|
||||
|
||||
@@ -415,17 +509,17 @@ class CompositeBackend:
|
||||
)
|
||||
|
||||
def upload_files(self, files: list[tuple[str, bytes]]) -> list[FileUploadResponse]:
|
||||
"""효율성을 위해 백엔드별로 배치 처리하여 여러 파일을 업로드합니다.
|
||||
"""Upload multiple files, batching by backend for efficiency.
|
||||
|
||||
파일을 대상 백엔드별로 그룹화하고, 각 백엔드의 upload_files를
|
||||
해당 백엔드의 모든 파일과 함께 한 번 호출한 다음, 결과를 원래 순서대로 병합합니다.
|
||||
Groups files by their target backend, calls each backend's upload_files
|
||||
once with all files for that backend, then merges results in original order.
|
||||
|
||||
Args:
|
||||
files: 업로드할 (path, content) 튜플의 리스트.
|
||||
files: List of (path, content) tuples to upload.
|
||||
|
||||
Returns:
|
||||
FileUploadResponse 객체들의 리스트. 입력 파일마다 하나씩 반환됩니다.
|
||||
응답 순서는 입력 순서와 일치합니다.
|
||||
List of FileUploadResponse objects, one per input file.
|
||||
Response order matches input order.
|
||||
"""
|
||||
# Pre-allocate result list
|
||||
results: list[FileUploadResponse | None] = [None] * len(files)
|
||||
@@ -458,7 +552,7 @@ class CompositeBackend:
|
||||
return results # type: ignore[return-value]
|
||||
|
||||
async def aupload_files(self, files: list[tuple[str, bytes]]) -> list[FileUploadResponse]:
|
||||
"""upload_files의 비동기 버전입니다."""
|
||||
"""Async version of upload_files."""
|
||||
# Pre-allocate result list
|
||||
results: list[FileUploadResponse | None] = [None] * len(files)
|
||||
|
||||
@@ -488,17 +582,17 @@ class CompositeBackend:
|
||||
return results # type: ignore[return-value]
|
||||
|
||||
def download_files(self, paths: list[str]) -> list[FileDownloadResponse]:
|
||||
"""효율성을 위해 백엔드별로 배치 처리하여 여러 파일을 다운로드합니다.
|
||||
"""Download multiple files, batching by backend for efficiency.
|
||||
|
||||
경로를 대상 백엔드별로 그룹화하고, 각 백엔드의 download_files를
|
||||
해당 백엔드의 모든 경로와 함께 한 번 호출한 다음, 결과를 원래 순서대로 병합합니다.
|
||||
Groups paths by their target backend, calls each backend's download_files
|
||||
once with all paths for that backend, then merges results in original order.
|
||||
|
||||
Args:
|
||||
paths: 다운로드할 파일 경로의 리스트.
|
||||
paths: List of file paths to download.
|
||||
|
||||
Returns:
|
||||
FileDownloadResponse 객체들의 리스트. 입력 경로마다 하나씩 반환됩니다.
|
||||
응답 순서는 입력 순서와 일치합니다.
|
||||
List of FileDownloadResponse objects, one per input path.
|
||||
Response order matches input order.
|
||||
"""
|
||||
# Pre-allocate result list
|
||||
results: list[FileDownloadResponse | None] = [None] * len(paths)
|
||||
@@ -528,7 +622,7 @@ class CompositeBackend:
|
||||
return results # type: ignore[return-value]
|
||||
|
||||
async def adownload_files(self, paths: list[str]) -> list[FileDownloadResponse]:
|
||||
"""download_files의 비동기 버전입니다."""
|
||||
"""Async version of download_files."""
|
||||
# Pre-allocate result list
|
||||
results: list[FileDownloadResponse | None] = [None] * len(paths)
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
"""FilesystemBackend: 파일시스템에서 직접 파일을 읽고 씁니다.
|
||||
"""FilesystemBackend: Read and write files directly from the filesystem.
|
||||
|
||||
보안 및 검색 업그레이드:
|
||||
- virtual_mode일 때 루트 포함(root containment)을 통한 보안 경로 확인 (cwd로 샌드박싱됨)
|
||||
- 가능한 경우 O_NOFOLLOW를 사용하여 파일 I/O 시 심볼릭 링크 따라가기 방지
|
||||
- JSON 파싱을 포함한 Ripgrep 기반 검색과, 가상 경로 동작을 보존하면서
|
||||
정규식 및 선택적 glob 포함 필터링을 지원하는 Python 폴백(fallback) 기능
|
||||
Security and search upgrades:
|
||||
- Secure path resolution with root containment when in virtual_mode (sandboxed to cwd)
|
||||
- Prevent symlink-following on file I/O using O_NOFOLLOW when available
|
||||
- Ripgrep-powered grep with JSON parsing, plus Python fallback with regex
|
||||
and optional glob include filtering, while preserving virtual path behavior
|
||||
"""
|
||||
|
||||
import json
|
||||
@@ -33,11 +33,11 @@ from deepagents.backends.utils import (
|
||||
|
||||
|
||||
class FilesystemBackend(BackendProtocol):
|
||||
"""파일시스템에서 직접 파일을 읽고 쓰는 백엔드.
|
||||
"""Backend that reads and writes files directly from the filesystem.
|
||||
|
||||
파일은 실제 파일시스템 경로를 사용하여 접근합니다. 상대 경로는
|
||||
현재 작업 디렉토리에 상대적으로 해결(resolve)됩니다. 내용은 일반 텍스트로
|
||||
읽고 쓰이며, 메타데이터(타임스탬프)는 파일시스템 상태(stat)에서 파생됩니다.
|
||||
Files are accessed using their actual filesystem paths. Relative paths are
|
||||
resolved relative to the current working directory. Content is read/written
|
||||
as plain text, and metadata (timestamps) are derived from filesystem stats.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -46,30 +46,30 @@ class FilesystemBackend(BackendProtocol):
|
||||
virtual_mode: bool = False,
|
||||
max_file_size_mb: int = 10,
|
||||
) -> None:
|
||||
"""파일시스템 백엔드를 초기화합니다.
|
||||
"""Initialize filesystem backend.
|
||||
|
||||
Args:
|
||||
root_dir: 파일 작업을 위한 선택적 루트 디렉토리. 제공된 경우,
|
||||
모든 파일 경로는 이 디렉토리에 상대적으로 해결됩니다.
|
||||
제공되지 않은 경우, 현재 작업 디렉토리를 사용합니다.
|
||||
root_dir: Optional root directory for file operations. If provided,
|
||||
all file paths will be resolved relative to this directory.
|
||||
If not provided, uses the current working directory.
|
||||
"""
|
||||
self.cwd = Path(root_dir).resolve() if root_dir else Path.cwd()
|
||||
self.virtual_mode = virtual_mode
|
||||
self.max_file_size_bytes = max_file_size_mb * 1024 * 1024
|
||||
|
||||
def _resolve_path(self, key: str) -> Path:
|
||||
"""보안 검사를 포함하여 파일 경로를 해결(resolve)합니다.
|
||||
"""Resolve a file path with security checks.
|
||||
|
||||
virtual_mode=True일 때, 들어오는 경로를 self.cwd 하위의 가상 절대 경로로 취급하며,
|
||||
상위 경로 탐색(.., ~)을 허용하지 않고 해결된 경로가 루트 내에 머물도록 보장합니다.
|
||||
virtual_mode=False일 때, 레거시 동작을 유지합니다: 절대 경로는 그대료 허용되고,
|
||||
상대 경로는 cwd 하위로 해결됩니다.
|
||||
When virtual_mode=True, treat incoming paths as virtual absolute paths under
|
||||
self.cwd, disallow traversal (.., ~) and ensure resolved path stays within root.
|
||||
When virtual_mode=False, preserve legacy behavior: absolute paths are allowed
|
||||
as-is; relative paths resolve under cwd.
|
||||
|
||||
Args:
|
||||
key: 파일 경로 (절대, 상대, 또는 virtual_mode=True일 때 가상 경로)
|
||||
key: File path (absolute, relative, or virtual when virtual_mode=True)
|
||||
|
||||
Returns:
|
||||
해결된 절대 Path 객체
|
||||
Resolved absolute Path object
|
||||
"""
|
||||
if self.virtual_mode:
|
||||
vpath = key if key.startswith("/") else "/" + key
|
||||
@@ -88,14 +88,14 @@ class FilesystemBackend(BackendProtocol):
|
||||
return (self.cwd / path).resolve()
|
||||
|
||||
def ls_info(self, path: str) -> list[FileInfo]:
|
||||
"""지정된 디렉토리의 파일과 디렉토리를 나열합니다 (비재귀적).
|
||||
"""List files and directories in the specified directory (non-recursive).
|
||||
|
||||
Args:
|
||||
path: 파일 목록을 가져올 절대 디렉토리 경로.
|
||||
path: Absolute directory path to list files from.
|
||||
|
||||
Returns:
|
||||
디렉토리 바로 아래에 있는 파일 및 디렉토리에 대한 FileInfo 유사 dict 목록.
|
||||
디렉토리는 경로 끝에 /가 붙으며 is_dir=True입니다.
|
||||
List of FileInfo-like dicts for files and directories directly in the directory.
|
||||
Directories have a trailing / in their path and is_dir=True.
|
||||
"""
|
||||
dir_path = self._resolve_path(path)
|
||||
if not dir_path.exists() or not dir_path.is_dir():
|
||||
@@ -124,23 +124,27 @@ class FilesystemBackend(BackendProtocol):
|
||||
if is_file:
|
||||
try:
|
||||
st = child_path.stat()
|
||||
results.append({
|
||||
"path": abs_path,
|
||||
"is_dir": False,
|
||||
"size": int(st.st_size),
|
||||
"modified_at": datetime.fromtimestamp(st.st_mtime).isoformat(),
|
||||
})
|
||||
results.append(
|
||||
{
|
||||
"path": abs_path,
|
||||
"is_dir": False,
|
||||
"size": int(st.st_size),
|
||||
"modified_at": datetime.fromtimestamp(st.st_mtime).isoformat(),
|
||||
}
|
||||
)
|
||||
except OSError:
|
||||
results.append({"path": abs_path, "is_dir": False})
|
||||
elif is_dir:
|
||||
try:
|
||||
st = child_path.stat()
|
||||
results.append({
|
||||
"path": abs_path + "/",
|
||||
"is_dir": True,
|
||||
"size": 0,
|
||||
"modified_at": datetime.fromtimestamp(st.st_mtime).isoformat(),
|
||||
})
|
||||
results.append(
|
||||
{
|
||||
"path": abs_path + "/",
|
||||
"is_dir": True,
|
||||
"size": 0,
|
||||
"modified_at": datetime.fromtimestamp(st.st_mtime).isoformat(),
|
||||
}
|
||||
)
|
||||
except OSError:
|
||||
results.append({"path": abs_path + "/", "is_dir": True})
|
||||
else:
|
||||
@@ -159,23 +163,27 @@ class FilesystemBackend(BackendProtocol):
|
||||
if is_file:
|
||||
try:
|
||||
st = child_path.stat()
|
||||
results.append({
|
||||
"path": virt_path,
|
||||
"is_dir": False,
|
||||
"size": int(st.st_size),
|
||||
"modified_at": datetime.fromtimestamp(st.st_mtime).isoformat(),
|
||||
})
|
||||
results.append(
|
||||
{
|
||||
"path": virt_path,
|
||||
"is_dir": False,
|
||||
"size": int(st.st_size),
|
||||
"modified_at": datetime.fromtimestamp(st.st_mtime).isoformat(),
|
||||
}
|
||||
)
|
||||
except OSError:
|
||||
results.append({"path": virt_path, "is_dir": False})
|
||||
elif is_dir:
|
||||
try:
|
||||
st = child_path.stat()
|
||||
results.append({
|
||||
"path": virt_path + "/",
|
||||
"is_dir": True,
|
||||
"size": 0,
|
||||
"modified_at": datetime.fromtimestamp(st.st_mtime).isoformat(),
|
||||
})
|
||||
results.append(
|
||||
{
|
||||
"path": virt_path + "/",
|
||||
"is_dir": True,
|
||||
"size": 0,
|
||||
"modified_at": datetime.fromtimestamp(st.st_mtime).isoformat(),
|
||||
}
|
||||
)
|
||||
except OSError:
|
||||
results.append({"path": virt_path + "/", "is_dir": True})
|
||||
except (OSError, PermissionError):
|
||||
@@ -191,15 +199,15 @@ class FilesystemBackend(BackendProtocol):
|
||||
offset: int = 0,
|
||||
limit: int = 2000,
|
||||
) -> str:
|
||||
"""파일 내용을 라인 번호와 함께 읽습니다.
|
||||
"""Read file content with line numbers.
|
||||
|
||||
Args:
|
||||
file_path: 절대 또는 상대 파일 경로.
|
||||
offset: 읽기 시작할 라인 오프셋 (0부터 시작).
|
||||
limit: 읽을 최대 라인 수.
|
||||
file_path: Absolute or relative file path.
|
||||
offset: Line offset to start reading from (0-indexed).
|
||||
limit: Maximum number of lines to read.
|
||||
|
||||
Returns:
|
||||
라인 번호가 포함된 형식화된 파일 내용, 또는 에러 메시지.
|
||||
Formatted file content with line numbers, or error message.
|
||||
"""
|
||||
resolved_path = self._resolve_path(file_path)
|
||||
|
||||
@@ -233,15 +241,13 @@ class FilesystemBackend(BackendProtocol):
|
||||
file_path: str,
|
||||
content: str,
|
||||
) -> WriteResult:
|
||||
"""내용을 포함하는 새 파일을 생성합니다.
|
||||
WriteResult를 반환합니다. 외부 저장소는 files_update=None을 설정합니다.
|
||||
"""Create a new file with content.
|
||||
Returns WriteResult. External storage sets files_update=None.
|
||||
"""
|
||||
resolved_path = self._resolve_path(file_path)
|
||||
|
||||
if resolved_path.exists():
|
||||
return WriteResult(
|
||||
error=f"Cannot write to {file_path} because it already exists. Read and then make an edit, or write to a new path."
|
||||
)
|
||||
return WriteResult(error=f"Cannot write to {file_path} because it already exists. Read and then make an edit, or write to a new path.")
|
||||
|
||||
try:
|
||||
# Create parent directories if needed
|
||||
@@ -266,8 +272,8 @@ class FilesystemBackend(BackendProtocol):
|
||||
new_string: str,
|
||||
replace_all: bool = False,
|
||||
) -> EditResult:
|
||||
"""문자열 발생(occurrences)을 교체하여 파일을 편집합니다.
|
||||
EditResult를 반환합니다. 외부 저장소는 files_update=None을 설정합니다.
|
||||
"""Edit a file by replacing string occurrences.
|
||||
Returns EditResult. External storage sets files_update=None.
|
||||
"""
|
||||
resolved_path = self._resolve_path(file_path)
|
||||
|
||||
@@ -331,9 +337,7 @@ class FilesystemBackend(BackendProtocol):
|
||||
matches.append({"path": fpath, "line": int(line_num), "text": line_text})
|
||||
return matches
|
||||
|
||||
def _ripgrep_search(
|
||||
self, pattern: str, base_full: Path, include_glob: str | None
|
||||
) -> dict[str, list[tuple[int, str]]] | None:
|
||||
def _ripgrep_search(self, pattern: str, base_full: Path, include_glob: str | None) -> dict[str, list[tuple[int, str]]] | None:
|
||||
cmd = ["rg", "--json"]
|
||||
if include_glob:
|
||||
cmd.extend(["--glob", include_glob])
|
||||
@@ -378,9 +382,7 @@ class FilesystemBackend(BackendProtocol):
|
||||
|
||||
return results
|
||||
|
||||
def _python_search(
|
||||
self, pattern: str, base_full: Path, include_glob: str | None
|
||||
) -> dict[str, list[tuple[int, str]]]:
|
||||
def _python_search(self, pattern: str, base_full: Path, include_glob: str | None) -> dict[str, list[tuple[int, str]]]:
|
||||
try:
|
||||
regex = re.compile(pattern)
|
||||
except re.error:
|
||||
@@ -438,12 +440,14 @@ class FilesystemBackend(BackendProtocol):
|
||||
if not self.virtual_mode:
|
||||
try:
|
||||
st = matched_path.stat()
|
||||
results.append({
|
||||
"path": abs_path,
|
||||
"is_dir": False,
|
||||
"size": int(st.st_size),
|
||||
"modified_at": datetime.fromtimestamp(st.st_mtime).isoformat(),
|
||||
})
|
||||
results.append(
|
||||
{
|
||||
"path": abs_path,
|
||||
"is_dir": False,
|
||||
"size": int(st.st_size),
|
||||
"modified_at": datetime.fromtimestamp(st.st_mtime).isoformat(),
|
||||
}
|
||||
)
|
||||
except OSError:
|
||||
results.append({"path": abs_path, "is_dir": False})
|
||||
else:
|
||||
@@ -459,12 +463,14 @@ class FilesystemBackend(BackendProtocol):
|
||||
virt = "/" + relative_path
|
||||
try:
|
||||
st = matched_path.stat()
|
||||
results.append({
|
||||
"path": virt,
|
||||
"is_dir": False,
|
||||
"size": int(st.st_size),
|
||||
"modified_at": datetime.fromtimestamp(st.st_mtime).isoformat(),
|
||||
})
|
||||
results.append(
|
||||
{
|
||||
"path": virt,
|
||||
"is_dir": False,
|
||||
"size": int(st.st_size),
|
||||
"modified_at": datetime.fromtimestamp(st.st_mtime).isoformat(),
|
||||
}
|
||||
)
|
||||
except OSError:
|
||||
results.append({"path": virt, "is_dir": False})
|
||||
except (OSError, ValueError):
|
||||
@@ -474,14 +480,14 @@ class FilesystemBackend(BackendProtocol):
|
||||
return results
|
||||
|
||||
def upload_files(self, files: list[tuple[str, bytes]]) -> list[FileUploadResponse]:
|
||||
"""파일시스템에 여러 파일을 업로드합니다.
|
||||
"""Upload multiple files to the filesystem.
|
||||
|
||||
Args:
|
||||
files: 내용이 bytes인 (path, content) 튜플의 리스트.
|
||||
files: List of (path, content) tuples where content is bytes.
|
||||
|
||||
Returns:
|
||||
FileUploadResponse 객체들의 리스트. 입력 파일마다 하나씩 반환됩니다.
|
||||
응답 순서는 입력 순서와 일치합니다.
|
||||
List of FileUploadResponse objects, one per input file.
|
||||
Response order matches input order.
|
||||
"""
|
||||
responses: list[FileUploadResponse] = []
|
||||
for path, content in files:
|
||||
@@ -514,13 +520,13 @@ class FilesystemBackend(BackendProtocol):
|
||||
return responses
|
||||
|
||||
def download_files(self, paths: list[str]) -> list[FileDownloadResponse]:
|
||||
"""파일시스템에서 여러 파일을 다운로드합니다.
|
||||
"""Download multiple files from the filesystem.
|
||||
|
||||
Args:
|
||||
paths: 다운로드할 파일 경로의 리스트.
|
||||
paths: List of file paths to download.
|
||||
|
||||
Returns:
|
||||
FileDownloadResponse 객체들의 리스트. 입력 경로마다 하나씩 반환됩니다.
|
||||
List of FileDownloadResponse objects, one per input path.
|
||||
"""
|
||||
responses: list[FileDownloadResponse] = []
|
||||
for path in paths:
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
"""플러그형 메모리 백엔드를 위한 프로토콜 정의.
|
||||
"""Protocol definition for pluggable memory backends.
|
||||
|
||||
이 모듈은 모든 백엔드 구현이 따라야 하는 BackendProtocol을 정의합니다.
|
||||
백엔드는 파일들을 서로 다른 위치(state, filesystem, database 등)에 저장할 수 있으며,
|
||||
파일 작업에 대해 통일된 인터페이스를 제공합니다.
|
||||
This module defines the BackendProtocol that all backend implementations
|
||||
must follow. Backends can store files in different locations (state, filesystem,
|
||||
database, etc.) and provide a uniform interface for file operations.
|
||||
"""
|
||||
|
||||
import abc
|
||||
@@ -20,31 +20,32 @@ FileOperationError = Literal[
|
||||
"is_directory", # Download: tried to download directory as file
|
||||
"invalid_path", # Both: path syntax malformed (parent dir missing, invalid chars)
|
||||
]
|
||||
"""파일 업로드/다운로드 작업을 위한 표준화된 에러 코드.
|
||||
"""Standardized error codes for file upload/download operations.
|
||||
|
||||
이 코드들은 LLM이 이해하고 잠재적으로 수정할 수 있는 일반적인 복구 가능 에러들을 나타냅니다:
|
||||
- file_not_found: 요청한 파일이 존재하지 않음 (다운로드)
|
||||
- parent_not_found: 부모 디렉토리가 존재하지 않음 (업로드)
|
||||
- permission_denied: 작업에 대한 접근이 거부됨
|
||||
- is_directory: 디렉토리를 파일로 다운로드하려고 시도함
|
||||
- invalid_path: 경로 구문이 잘못되었거나 유효하지 않은 문자를 포함함
|
||||
These represent common, recoverable errors that an LLM can understand and potentially fix:
|
||||
- file_not_found: The requested file doesn't exist (download)
|
||||
- parent_not_found: The parent directory doesn't exist (upload)
|
||||
- permission_denied: Access denied for the operation
|
||||
- is_directory: Attempted to download a directory as a file
|
||||
- invalid_path: Path syntax is malformed or contains invalid characters
|
||||
"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileDownloadResponse:
|
||||
"""단일 파일 다운로드 작업의 결과.
|
||||
"""Result of a single file download operation.
|
||||
|
||||
이 응답은 일괄 작업에서 부분적인 성공을 허용하도록 설계되었습니다.
|
||||
에러는 LLM이 파일 작업을 수행하는 사용 사례에서 복구 가능한 특정 조건들을 위해
|
||||
FileOperationError 리터럴을 사용하여 표준화되었습니다.
|
||||
The response is designed to allow partial success in batch operations.
|
||||
The errors are standardized using FileOperationError literals
|
||||
for certain recoverable conditions for use cases that involve
|
||||
LLMs performing file operations.
|
||||
|
||||
Attributes:
|
||||
path: 요청된 파일 경로. 일괄 결과를 처리할 때 쉬운 상호참조를 위해 포함됩니다.
|
||||
에러 메시지에 특히 유용합니다.
|
||||
content: 성공 시 파일 내용(bytes), 실패 시 None.
|
||||
error: 실패 시 표준화된 에러 코드, 성공 시 None.
|
||||
구조화되고 LLM이 조치 가능한 에러 보고를 위해 FileOperationError 리터럴을 사용합니다.
|
||||
path: The file path that was requested. Included for easy correlation
|
||||
when processing batch results, especially useful for error messages.
|
||||
content: File contents as bytes on success, None on failure.
|
||||
error: Standardized error code on failure, None on success.
|
||||
Uses FileOperationError literal for structured, LLM-actionable error reporting.
|
||||
|
||||
Examples:
|
||||
>>> # Success
|
||||
@@ -60,16 +61,18 @@ class FileDownloadResponse:
|
||||
|
||||
@dataclass
|
||||
class FileUploadResponse:
|
||||
"""단일 파일 업로드 작업의 결과.
|
||||
"""Result of a single file upload operation.
|
||||
|
||||
이 응답은 일괄 작업에서 부분적인 성공을 허용하도록 설계되었습니다.
|
||||
에러는 LLM이 파일 작업을 수행하는 사용 사례에서 복구 가능한 특정 조건들을 위해
|
||||
FileOperationError 리터럴을 사용하여 표준화되었습니다.
|
||||
The response is designed to allow partial success in batch operations.
|
||||
The errors are standardized using FileOperationError literals
|
||||
for certain recoverable conditions for use cases that involve
|
||||
LLMs performing file operations.
|
||||
|
||||
Attributes:
|
||||
path: 요청된 파일 경로. 일괄 결과를 처리할 때 쉬운 상호참조와 명확한 에러 메시지를 위해 포함됩니다.
|
||||
error: 실패 시 표준화된 에러 코드, 성공 시 None.
|
||||
구조화되고 LLM이 조치 가능한 에러 보고를 위해 FileOperationError 리터럴을 사용합니다.
|
||||
path: The file path that was requested. Included for easy correlation
|
||||
when processing batch results and for clear error messages.
|
||||
error: Standardized error code on failure, None on success.
|
||||
Uses FileOperationError literal for structured, LLM-actionable error reporting.
|
||||
|
||||
Examples:
|
||||
>>> # Success
|
||||
@@ -83,10 +86,10 @@ class FileUploadResponse:
|
||||
|
||||
|
||||
class FileInfo(TypedDict):
|
||||
"""구조화된 파일 목록 정보.
|
||||
"""Structured file listing info.
|
||||
|
||||
백엔드 전반에서 사용되는 최소한의 계약입니다. "path"만 필수입니다.
|
||||
다른 필드들은 최선의 노력(best-effort)으로 제공되며 백엔드에 따라 없을 수 있습니다.
|
||||
Minimal contract used across backends. Only "path" is required.
|
||||
Other fields are best-effort and may be absent depending on backend.
|
||||
"""
|
||||
|
||||
path: str
|
||||
@@ -96,7 +99,7 @@ class FileInfo(TypedDict):
|
||||
|
||||
|
||||
class GrepMatch(TypedDict):
|
||||
"""구조화된 grep 일치(match) 항목."""
|
||||
"""Structured grep match entry."""
|
||||
|
||||
path: str
|
||||
line: int
|
||||
@@ -105,14 +108,14 @@ class GrepMatch(TypedDict):
|
||||
|
||||
@dataclass
|
||||
class WriteResult:
|
||||
"""백엔드 쓰기(write) 작업의 결과.
|
||||
"""Result from backend write operations.
|
||||
|
||||
Attributes:
|
||||
error: 실패 시 에러 메시지, 성공 시 None.
|
||||
path: 작성된 파일의 절대 경로, 실패 시 None.
|
||||
files_update: 체크포인트 백엔드를 위한 상태 업데이트 dict, 외부 저장소인 경우 None.
|
||||
체크포인트 백엔드는 이를 LangGraph 상태를 위한 {file_path: file_data}로 채웁니다.
|
||||
외부 백엔드는 None으로 설정합니다 (이미 디스크/S3/데이터베이스 등에 영구 저장됨).
|
||||
error: Error message on failure, None on success.
|
||||
path: Absolute path of written file, None on failure.
|
||||
files_update: State update dict for checkpoint backends, None for external storage.
|
||||
Checkpoint backends populate this with {file_path: file_data} for LangGraph state.
|
||||
External backends set None (already persisted to disk/S3/database/etc).
|
||||
|
||||
Examples:
|
||||
>>> # Checkpoint storage
|
||||
@@ -130,15 +133,15 @@ class WriteResult:
|
||||
|
||||
@dataclass
|
||||
class EditResult:
|
||||
"""백엔드 편집(edit) 작업의 결과.
|
||||
"""Result from backend edit operations.
|
||||
|
||||
Attributes:
|
||||
error: 실패 시 에러 메시지, 성공 시 None.
|
||||
path: 편집된 파일의 절대 경로, 실패 시 None.
|
||||
files_update: 체크포인트 백엔드를 위한 상태 업데이트 dict, 외부 저장소인 경우 None.
|
||||
체크포인트 백엔드는 이를 LangGraph 상태를 위한 {file_path: file_data}로 채웁니다.
|
||||
외부 백엔드는 None으로 설정합니다 (이미 디스크/S3/데이터베이스 등에 영구 저장됨).
|
||||
occurrences: 교체된 횟수, 실패 시 None.
|
||||
error: Error message on failure, None on success.
|
||||
path: Absolute path of edited file, None on failure.
|
||||
files_update: State update dict for checkpoint backends, None for external storage.
|
||||
Checkpoint backends populate this with {file_path: file_data} for LangGraph state.
|
||||
External backends set None (already persisted to disk/S3/database/etc).
|
||||
occurrences: Number of replacements made, None on failure.
|
||||
|
||||
Examples:
|
||||
>>> # Checkpoint storage
|
||||
@@ -156,32 +159,32 @@ class EditResult:
|
||||
|
||||
|
||||
class BackendProtocol(abc.ABC):
|
||||
"""플러그형 메모리 백엔드를 위한 프로토콜 (단일 통일 인터페이스).
|
||||
"""Protocol for pluggable memory backends (single, unified).
|
||||
|
||||
백엔드는 파일들을 다양한 위치(state, filesystem, database 등)에 저장할 수 있으며,
|
||||
파일 작업에 대해 통일된 인터페이스를 제공합니다.
|
||||
Backends can store files in different locations (state, filesystem, database, etc.)
|
||||
and provide a uniform interface for file operations.
|
||||
|
||||
모든 파일 데이터는 다음 구조를 가진 딕셔너리로 표현됩니다:
|
||||
All file data is represented as dicts with the following structure:
|
||||
{
|
||||
"content": list[str], # 텍스트 내용의 라인 리스트
|
||||
"created_at": str, # ISO 형식 타임스탬프
|
||||
"modified_at": str, # ISO 형식 타임스탬프
|
||||
"content": list[str], # Lines of text content
|
||||
"created_at": str, # ISO format timestamp
|
||||
"modified_at": str, # ISO format timestamp
|
||||
}
|
||||
"""
|
||||
|
||||
def ls_info(self, path: str) -> list["FileInfo"]:
|
||||
"""디렉토리 내의 모든 파일과 메타데이터를 나열합니다.
|
||||
"""List all files in a directory with metadata.
|
||||
|
||||
Args:
|
||||
path: 목록을 조회할 디렉토리의 절대 경로. '/'로 시작해야 합니다.
|
||||
path: Absolute path to the directory to list. Must start with '/'.
|
||||
|
||||
Returns:
|
||||
파일 메타데이터를 포함하는 FileInfo 딕셔너리의 리스트:
|
||||
List of FileInfo dicts containing file metadata:
|
||||
|
||||
- `path` (필수): 절대 파일 경로
|
||||
- `is_dir` (선택): 디렉토리인 경우 True
|
||||
- `size` (선택): 바이트 단위 파일 크기
|
||||
- `modified_at` (선택): ISO 8601 타임스탬프
|
||||
- `path` (required): Absolute file path
|
||||
- `is_dir` (optional): True if directory
|
||||
- `size` (optional): File size in bytes
|
||||
- `modified_at` (optional): ISO 8601 timestamp
|
||||
"""
|
||||
|
||||
async def als_info(self, path: str) -> list["FileInfo"]:
|
||||
@@ -194,25 +197,25 @@ class BackendProtocol(abc.ABC):
|
||||
offset: int = 0,
|
||||
limit: int = 2000,
|
||||
) -> str:
|
||||
"""파일 내용을 라인 번호와 함께 읽습니다.
|
||||
"""Read file content with line numbers.
|
||||
|
||||
Args:
|
||||
file_path: 읽을 파일의 절대 경로. '/'로 시작해야 합니다.
|
||||
offset: 읽기 시작할 라인 번호 (0부터 시작). 기본값: 0.
|
||||
limit: 읽을 최대 라인 수. 기본값: 2000.
|
||||
file_path: Absolute path to the file to read. Must start with '/'.
|
||||
offset: Line number to start reading from (0-indexed). Default: 0.
|
||||
limit: Maximum number of lines to read. Default: 2000.
|
||||
|
||||
Returns:
|
||||
라인 번호가 포함된 파일 내용 문자열 (cat -n 형식), 1번 라인부터 시작합니다.
|
||||
2000자를 초과하는 라인은 잘립니다.
|
||||
String containing file content formatted with line numbers (cat -n format),
|
||||
starting at line 1. Lines longer than 2000 characters are truncated.
|
||||
|
||||
파일이 존재하지 않거나 읽을 수 없는 경우 에러 문자열을 반환합니다.
|
||||
Returns an error string if the file doesn't exist or can't be read.
|
||||
|
||||
!!! note
|
||||
- 컨텍스트 오버플로우를 방지하기 위해 대용량 파일에는 페이지네이션(offset/limit)을 사용하세요.
|
||||
- 첫 스캔: `read(path, limit=100)`으로 파일 구조 확인
|
||||
- 추가 읽기: `read(path, offset=100, limit=200)`으로 다음 구간 읽기
|
||||
- ALWAYS read a file before editing it (편집 전 반드시 파일 읽기)
|
||||
- 파일이 존재하지만 비어있는 경우, 시스템 리마인더 경고를 받게 됩니다.
|
||||
- Use pagination (offset/limit) for large files to avoid context overflow
|
||||
- First scan: `read(path, limit=100)` to see file structure
|
||||
- Read more: `read(path, offset=100, limit=200)` for next section
|
||||
- ALWAYS read a file before editing it
|
||||
- If file exists but is empty, you'll receive a system reminder warning
|
||||
"""
|
||||
|
||||
async def aread(
|
||||
@@ -230,38 +233,38 @@ class BackendProtocol(abc.ABC):
|
||||
path: str | None = None,
|
||||
glob: str | None = None,
|
||||
) -> list["GrepMatch"] | str:
|
||||
"""파일들에서 리터럴 텍스트 패턴을 검색합니다.
|
||||
"""Search for a literal text pattern in files.
|
||||
|
||||
Args:
|
||||
pattern: 검색할 리터럴 문자열 (정규식 아님).
|
||||
파일 내용 내에서 정확한 부분 문자열 매칭을 수행합니다.
|
||||
예: "TODO"는 "TODO"를 포함하는 모든 라인과 일치합니다.
|
||||
pattern: Literal string to search for (NOT regex).
|
||||
Performs exact substring matching within file content.
|
||||
Example: "TODO" matches any line containing "TODO"
|
||||
|
||||
path: 검색할 디렉토리 경로 (선택).
|
||||
None인 경우 현재 작업 디렉토리에서 검색합니다.
|
||||
예: "/workspace/src"
|
||||
path: Optional directory path to search in.
|
||||
If None, searches in current working directory.
|
||||
Example: "/workspace/src"
|
||||
|
||||
glob: 검색할 파일을 필터링하기 위한 선택적 glob 패턴.
|
||||
내용이 아닌 파일명/경로로 필터링합니다.
|
||||
표준 glob 와일드카드를 지원합니다:
|
||||
- `*`: 파일명의 모든 문자와 일치
|
||||
- `**`: 모든 디렉토리를 재귀적으로 일치
|
||||
- `?`: 단일 문자와 일치
|
||||
- `[abc]`: 세트 내의 한 문자와 일치
|
||||
glob: Optional glob pattern to filter which FILES to search.
|
||||
Filters by filename/path, not content.
|
||||
Supports standard glob wildcards:
|
||||
- `*` matches any characters in filename
|
||||
- `**` matches any directories recursively
|
||||
- `?` matches single character
|
||||
- `[abc]` matches one character from set
|
||||
|
||||
Examples:
|
||||
- "*.py" - Python 파일만 검색
|
||||
- "**/*.txt" - 모든 .txt 파일을 재귀적으로 검색
|
||||
- "src/**/*.js" - src/ 하위의 JS 파일 검색
|
||||
- "test[0-9].txt" - test0.txt, test1.txt 등을 검색
|
||||
- "*.py" - only search Python files
|
||||
- "**/*.txt" - search all .txt files recursively
|
||||
- "src/**/*.js" - search JS files under src/
|
||||
- "test[0-9].txt" - search test0.txt, test1.txt, etc.
|
||||
|
||||
Returns:
|
||||
성공 시: 다음을 포함하는 구조화된 결과 list[GrepMatch] 반환:
|
||||
- path: 절대 파일 경로
|
||||
- line: 라인 번호 (1부터 시작)
|
||||
- text: 매치를 포함하는 전체 라인 내용
|
||||
On success: list[GrepMatch] with structured results containing:
|
||||
- path: Absolute file path
|
||||
- line: Line number (1-indexed)
|
||||
- text: Full line content containing the match
|
||||
|
||||
실패 시: 에러 메시지 문자열 (예: 잘못된 경로, 권한 거부)
|
||||
On error: str with error message (e.g., invalid path, permission denied)
|
||||
"""
|
||||
|
||||
async def agrep_raw(
|
||||
@@ -274,18 +277,18 @@ class BackendProtocol(abc.ABC):
|
||||
return await asyncio.to_thread(self.grep_raw, pattern, path, glob)
|
||||
|
||||
def glob_info(self, pattern: str, path: str = "/") -> list["FileInfo"]:
|
||||
"""glob 패턴과 일치하는 파일을 찾습니다.
|
||||
"""Find files matching a glob pattern.
|
||||
|
||||
Args:
|
||||
pattern: 파일 경로와 일치시킬 와일드카드가 포함된 Glob 패턴.
|
||||
표준 glob 문법을 지원합니다:
|
||||
- `*` 파일명/디렉토리 내의 모든 문자와 일치
|
||||
- `**` 모든 디렉토리를 재귀적으로 일치
|
||||
- `?` 단일 문자와 일치
|
||||
- `[abc]` 세트 내의 한 문자와 일치
|
||||
pattern: Glob pattern with wildcards to match file paths.
|
||||
Supports standard glob syntax:
|
||||
- `*` matches any characters within a filename/directory
|
||||
- `**` matches any directories recursively
|
||||
- `?` matches a single character
|
||||
- `[abc]` matches one character from set
|
||||
|
||||
path: 검색을 시작할 기본 디렉토리. 기본값: "/" (루트).
|
||||
패턴은 이 경로에 상대적으로 적용됩니다.
|
||||
path: Base directory to search from. Default: "/" (root).
|
||||
The pattern is applied relative to this path.
|
||||
|
||||
Returns:
|
||||
list of FileInfo
|
||||
@@ -300,12 +303,12 @@ class BackendProtocol(abc.ABC):
|
||||
file_path: str,
|
||||
content: str,
|
||||
) -> WriteResult:
|
||||
"""파일시스템 내 새 파일에 내용을 씁니다. 파일이 존재하면 에러가 발생합니다.
|
||||
"""Write content to a new file in the filesystem, error if file exists.
|
||||
|
||||
Args:
|
||||
file_path: 파일이 생성될 절대 경로.
|
||||
'/'로 시작해야 합니다.
|
||||
content: 파일에 쓸 문자열 내용.
|
||||
file_path: Absolute path where the file should be created.
|
||||
Must start with '/'.
|
||||
content: String content to write to the file.
|
||||
|
||||
Returns:
|
||||
WriteResult
|
||||
@@ -326,16 +329,16 @@ class BackendProtocol(abc.ABC):
|
||||
new_string: str,
|
||||
replace_all: bool = False,
|
||||
) -> EditResult:
|
||||
"""기존 파일에서 정확한 문자열 교체를 수행합니다.
|
||||
"""Perform exact string replacements in an existing file.
|
||||
|
||||
Args:
|
||||
file_path: 편집할 파일의 절대 경로. '/'로 시작해야 합니다.
|
||||
old_string: 검색 및 교체할 정확한 문자열.
|
||||
공백과 들여쓰기를 포함하여 정확히 일치해야 합니다.
|
||||
new_string: old_string을 대체할 문자열.
|
||||
old_string과 달라야 합니다.
|
||||
replace_all: True인 경우 모든 발생을 교체합니다. False(기본값)인 경우
|
||||
old_string은 파일 내에서 유일해야 하며, 그렇지 않으면 편집이 실패합니다.
|
||||
file_path: Absolute path to the file to edit. Must start with '/'.
|
||||
old_string: Exact string to search for and replace.
|
||||
Must match exactly including whitespace and indentation.
|
||||
new_string: String to replace old_string with.
|
||||
Must be different from old_string.
|
||||
replace_all: If True, replace all occurrences. If False (default),
|
||||
old_string must be unique in the file or the edit fails.
|
||||
|
||||
Returns:
|
||||
EditResult
|
||||
@@ -352,25 +355,27 @@ class BackendProtocol(abc.ABC):
|
||||
return await asyncio.to_thread(self.edit, file_path, old_string, new_string, replace_all)
|
||||
|
||||
def upload_files(self, files: list[tuple[str, bytes]]) -> list[FileUploadResponse]:
|
||||
"""샌드박스에 여러 파일을 업로드합니다.
|
||||
"""Upload multiple files to the sandbox.
|
||||
|
||||
이 API는 개발자가 직접 사용하거나 커스텀 도구를 통해
|
||||
LLM에게 노출할 수 있도록 설계되었습니다.
|
||||
This API is designed to allow developers to use it either directly or
|
||||
by exposing it to LLMs via custom tools.
|
||||
|
||||
Args:
|
||||
files: 업로드할 (path, content) 튜플의 리스트.
|
||||
files: List of (path, content) tuples to upload.
|
||||
|
||||
Returns:
|
||||
FileUploadResponse 객체들의 리스트. 입력 파일마다 하나씩 반환됩니다.
|
||||
응답 순서는 입력 순서와 일치합니다 (files[i]에 대해 response[i]).
|
||||
파일별 성공/실패 여부는 error 필드를 확인하십시오.
|
||||
List of FileUploadResponse objects, one per input file.
|
||||
Response order matches input order (response[i] for files[i]).
|
||||
Check the error field to determine success/failure per file.
|
||||
|
||||
Examples:
|
||||
```python
|
||||
responses = sandbox.upload_files([
|
||||
("/app/config.json", b"{...}"),
|
||||
("/app/data.txt", b"content"),
|
||||
])
|
||||
responses = sandbox.upload_files(
|
||||
[
|
||||
("/app/config.json", b"{...}"),
|
||||
("/app/data.txt", b"content"),
|
||||
]
|
||||
)
|
||||
```
|
||||
"""
|
||||
|
||||
@@ -379,18 +384,18 @@ class BackendProtocol(abc.ABC):
|
||||
return await asyncio.to_thread(self.upload_files, files)
|
||||
|
||||
def download_files(self, paths: list[str]) -> list[FileDownloadResponse]:
|
||||
"""샌드박스에서 여러 파일을 다운로드합니다.
|
||||
"""Download multiple files from the sandbox.
|
||||
|
||||
이 API는 개발자가 직접 사용하거나 커스텀 도구를 통해
|
||||
LLM에게 노출할 수 있도록 설계되었습니다.
|
||||
This API is designed to allow developers to use it either directly or
|
||||
by exposing it to LLMs via custom tools.
|
||||
|
||||
Args:
|
||||
paths: 다운로드할 파일 경로의 리스트.
|
||||
paths: List of file paths to download.
|
||||
|
||||
Returns:
|
||||
FileDownloadResponse 객체들의 리스트. 입력 경로마다 하나씩 반환됩니다.
|
||||
응답 순서는 입력 순서와 일치합니다 (paths[i]에 대해 response[i]).
|
||||
파일별 성공/실패 여부는 error 필드를 확인하십시오.
|
||||
List of FileDownloadResponse objects, one per input path.
|
||||
Response order matches input order (response[i] for paths[i]).
|
||||
Check the error field to determine success/failure per file.
|
||||
"""
|
||||
|
||||
async def adownload_files(self, paths: list[str]) -> list[FileDownloadResponse]:
|
||||
@@ -400,41 +405,41 @@ class BackendProtocol(abc.ABC):
|
||||
|
||||
@dataclass
|
||||
class ExecuteResponse:
|
||||
"""코드 실행 결과.
|
||||
"""Result of code execution.
|
||||
|
||||
LLM 소비에 최적화된 단순화된 스키마입니다.
|
||||
Simplified schema optimized for LLM consumption.
|
||||
"""
|
||||
|
||||
output: str
|
||||
"""실행된 명령의 결합된 표준 출력(stdout) 및 표준 에러(stderr)."""
|
||||
"""Combined stdout and stderr output of the executed command."""
|
||||
|
||||
exit_code: int | None = None
|
||||
"""프로세스 종료 코드. 0은 성공, 0이 아닌 값은 실패를 나타냅니다."""
|
||||
"""The process exit code. 0 indicates success, non-zero indicates failure."""
|
||||
|
||||
truncated: bool = False
|
||||
"""백엔드 제한으로 인해 출력이 잘렸는지 여부."""
|
||||
"""Whether the output was truncated due to backend limitations."""
|
||||
|
||||
|
||||
class SandboxBackendProtocol(BackendProtocol):
|
||||
"""격리된 런타임을 가진 샌드박스 백엔드를 위한 프로토콜.
|
||||
"""Protocol for sandboxed backends with isolated runtime.
|
||||
|
||||
샌드박스 백엔드는 격리된 환경(예: 별도 프로세스, 컨테이너)에서 실행되며
|
||||
정의된 인터페이스를 통해 통신합니다.
|
||||
Sandboxed backends run in isolated environments (e.g., separate processes,
|
||||
containers) and communicate via defined interfaces.
|
||||
"""
|
||||
|
||||
def execute(
|
||||
self,
|
||||
command: str,
|
||||
) -> ExecuteResponse:
|
||||
"""프로세스에서 명령을 실행합니다.
|
||||
"""Execute a command in the process.
|
||||
|
||||
LLM 소비에 최적화된 단순화된 인터페이스.
|
||||
Simplified interface optimized for LLM consumption.
|
||||
|
||||
Args:
|
||||
command: 실행할 전체 쉘 명령 문자열.
|
||||
command: Full shell command string to execute.
|
||||
|
||||
Returns:
|
||||
결합된 출력, 종료 코드, 선택적 시그널, 잘림(truncation) 플래그를 포함하는 ExecuteResponse.
|
||||
ExecuteResponse with combined output, exit code, optional signal, and truncation flag.
|
||||
"""
|
||||
|
||||
async def aexecute(
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
"""execute()만을 추상 메서드로 가지는 기본 샌드박스 구현.
|
||||
"""Base sandbox implementation with execute() as the only abstract method.
|
||||
|
||||
이 모듈은 execute()를 통해 쉘 명령을 실행하여 모든 SandboxBackendProtocol
|
||||
메서드를 구현하는 기본 클래스를 제공합니다. 구체적인 구현체는
|
||||
오직 execute() 메서드만 구현하면 됩니다.
|
||||
This module provides a base class that implements all SandboxBackendProtocol
|
||||
methods using shell commands executed via execute(). Concrete implementations
|
||||
only need to implement the execute() method.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -29,7 +29,7 @@ import os
|
||||
import json
|
||||
import base64
|
||||
|
||||
# base64 인코딩된 파라미터 디코딩
|
||||
# Decode base64-encoded parameters
|
||||
path = base64.b64decode('{path_b64}').decode('utf-8')
|
||||
pattern = base64.b64decode('{pattern_b64}').decode('utf-8')
|
||||
|
||||
@@ -53,16 +53,16 @@ import base64
|
||||
|
||||
file_path = '{file_path}'
|
||||
|
||||
# 파일이 이미 존재하는지 확인 (쓰기와 원자적)
|
||||
# Check if file already exists (atomic with write)
|
||||
if os.path.exists(file_path):
|
||||
print(f'Error: File \\'{file_path}\\' already exists', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# 필요시 부모 디렉토리 생성
|
||||
# Create parent directory if needed
|
||||
parent_dir = os.path.dirname(file_path) or '.'
|
||||
os.makedirs(parent_dir, exist_ok=True)
|
||||
|
||||
# 내용 디코딩 및 쓰기
|
||||
# Decode and write content
|
||||
content = base64.b64decode('{content_b64}').decode('utf-8')
|
||||
with open(file_path, 'w') as f:
|
||||
f.write(content)
|
||||
@@ -72,30 +72,30 @@ _EDIT_COMMAND_TEMPLATE = """python3 -c "
|
||||
import sys
|
||||
import base64
|
||||
|
||||
# 파일 내용 읽기
|
||||
# Read file content
|
||||
with open('{file_path}', 'r') as f:
|
||||
text = f.read()
|
||||
|
||||
# base64 인코딩된 문자열 디코딩
|
||||
# Decode base64-encoded strings
|
||||
old = base64.b64decode('{old_b64}').decode('utf-8')
|
||||
new = base64.b64decode('{new_b64}').decode('utf-8')
|
||||
|
||||
# 발생 횟수 계산
|
||||
# Count occurrences
|
||||
count = text.count(old)
|
||||
|
||||
# 문제가 발견되면 에러 코드와 함께 종료
|
||||
# Exit with error codes if issues found
|
||||
if count == 0:
|
||||
sys.exit(1) # 문자열을 찾을 수 없음
|
||||
sys.exit(1) # String not found
|
||||
elif count > 1 and not {replace_all}:
|
||||
sys.exit(2) # replace_all 없이 여러 번 발생
|
||||
sys.exit(2) # Multiple occurrences without replace_all
|
||||
|
||||
# 교체 수행
|
||||
# Perform replacement
|
||||
if {replace_all}:
|
||||
result = text.replace(old, new)
|
||||
else:
|
||||
result = text.replace(old, new, 1)
|
||||
|
||||
# 파일에 다시 쓰기
|
||||
# Write back to file
|
||||
with open('{file_path}', 'w') as f:
|
||||
f.write(result)
|
||||
|
||||
@@ -110,39 +110,39 @@ file_path = '{file_path}'
|
||||
offset = {offset}
|
||||
limit = {limit}
|
||||
|
||||
# 파일이 존재하는지 확인
|
||||
# Check if file exists
|
||||
if not os.path.isfile(file_path):
|
||||
print('Error: File not found')
|
||||
sys.exit(1)
|
||||
|
||||
# 파일이 비어있는지 확인
|
||||
# Check if file is empty
|
||||
if os.path.getsize(file_path) == 0:
|
||||
print('System reminder: File exists but has empty contents')
|
||||
sys.exit(0)
|
||||
|
||||
# offset과 limit으로 파일 읽기
|
||||
# Read file with offset and limit
|
||||
with open(file_path, 'r') as f:
|
||||
lines = f.readlines()
|
||||
|
||||
# offset과 limit 적용
|
||||
# Apply offset and limit
|
||||
start_idx = offset
|
||||
end_idx = offset + limit
|
||||
selected_lines = lines[start_idx:end_idx]
|
||||
|
||||
# 라인 번호로 포맷팅 (1부터 시작, offset + 1부터 시작)
|
||||
# Format with line numbers (1-indexed, starting from offset + 1)
|
||||
for i, line in enumerate(selected_lines):
|
||||
line_num = offset + i + 1
|
||||
# 포맷팅을 위해 끝의 개행 문자 제거 후 다시 추가
|
||||
# Remove trailing newline for formatting, then add it back
|
||||
line_content = line.rstrip('\\n')
|
||||
print(f'{{line_num:6d}}\\t{{line_content}}')
|
||||
" 2>&1"""
|
||||
|
||||
|
||||
class BaseSandbox(SandboxBackendProtocol, ABC):
|
||||
"""execute()를 추상 메서드로 가지는 기본 샌드박스 구현.
|
||||
"""Base sandbox implementation with execute() as abstract method.
|
||||
|
||||
이 클래스는 쉘 명령을 사용하여 모든 프로토콜 메서드에 대한 기본 구현을
|
||||
제공합니다. 하위 클래스는 오직 execute()만 구현하면 됩니다.
|
||||
This class provides default implementations for all protocol methods
|
||||
using shell commands. Subclasses only need to implement execute().
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
@@ -150,18 +150,18 @@ class BaseSandbox(SandboxBackendProtocol, ABC):
|
||||
self,
|
||||
command: str,
|
||||
) -> ExecuteResponse:
|
||||
"""샌드박스에서 명령을 실행하고 ExecuteResponse를 반환합니다.
|
||||
"""Execute a command in the sandbox and return ExecuteResponse.
|
||||
|
||||
Args:
|
||||
command: 실행할 전체 쉘 명령 문자열.
|
||||
command: Full shell command string to execute.
|
||||
|
||||
Returns:
|
||||
결합된 출력, 종료 코드, 선택적 시그널, 잘림(truncation) 플래그를 포함하는 ExecuteResponse.
|
||||
ExecuteResponse with combined output, exit code, optional signal, and truncation flag.
|
||||
"""
|
||||
...
|
||||
|
||||
def ls_info(self, path: str) -> list[FileInfo]:
|
||||
"""os.scandir을 사용하여 파일 메타데이터가 포함된 구조화된 목록을 반환합니다."""
|
||||
"""Structured listing with file metadata using os.scandir."""
|
||||
cmd = f"""python3 -c "
|
||||
import os
|
||||
import json
|
||||
@@ -202,8 +202,8 @@ except PermissionError:
|
||||
offset: int = 0,
|
||||
limit: int = 2000,
|
||||
) -> str:
|
||||
"""단일 쉘 명령을 사용하여 라인 번호와 함께 파일 내용을 읽습니다."""
|
||||
# offset과 limit으로 파일을 읽기 위해 템플릿 사용
|
||||
"""Read file content with line numbers using a single shell command."""
|
||||
# Use template for reading file with offset and limit
|
||||
cmd = _READ_COMMAND_TEMPLATE.format(file_path=file_path, offset=offset, limit=limit)
|
||||
result = self.execute(cmd)
|
||||
|
||||
@@ -220,20 +220,20 @@ except PermissionError:
|
||||
file_path: str,
|
||||
content: str,
|
||||
) -> WriteResult:
|
||||
"""새 파일을 생성합니다. WriteResult를 반환하며, 실패 시 에러가 채워집니다."""
|
||||
# 이스케이프 문제를 피하기 위해 내용을 base64로 인코딩
|
||||
"""Create a new file. Returns WriteResult; error populated on failure."""
|
||||
# Encode content as base64 to avoid any escaping issues
|
||||
content_b64 = base64.b64encode(content.encode("utf-8")).decode("ascii")
|
||||
|
||||
# 단일 원자적 확인 + 쓰기 명령
|
||||
# Single atomic check + write command
|
||||
cmd = _WRITE_COMMAND_TEMPLATE.format(file_path=file_path, content_b64=content_b64)
|
||||
result = self.execute(cmd)
|
||||
|
||||
# 에러 확인 (종료 코드 또는 출력 내 에러 메시지)
|
||||
# Check for errors (exit code or error message in output)
|
||||
if result.exit_code != 0 or "Error:" in result.output:
|
||||
error_msg = result.output.strip() or f"Failed to write file '{file_path}'"
|
||||
return WriteResult(error=error_msg)
|
||||
|
||||
# 외부 저장소 - files_update 필요 없음
|
||||
# External storage - no files_update needed
|
||||
return WriteResult(path=file_path, files_update=None)
|
||||
|
||||
def edit(
|
||||
@@ -243,15 +243,13 @@ except PermissionError:
|
||||
new_string: str,
|
||||
replace_all: bool = False,
|
||||
) -> EditResult:
|
||||
"""문자열 발생(occurrences)을 교체하여 파일을 편집합니다. EditResult를 반환합니다."""
|
||||
# 이스케이프 문제를 피하기 위해 문자열을 base64로 인코딩
|
||||
"""Edit a file by replacing string occurrences. Returns EditResult."""
|
||||
# Encode strings as base64 to avoid any escaping issues
|
||||
old_b64 = base64.b64encode(old_string.encode("utf-8")).decode("ascii")
|
||||
new_b64 = base64.b64encode(new_string.encode("utf-8")).decode("ascii")
|
||||
|
||||
# 문자열 교체를 위해 템플릿 사용
|
||||
cmd = _EDIT_COMMAND_TEMPLATE.format(
|
||||
file_path=file_path, old_b64=old_b64, new_b64=new_b64, replace_all=replace_all
|
||||
)
|
||||
# Use template for string replacement
|
||||
cmd = _EDIT_COMMAND_TEMPLATE.format(file_path=file_path, old_b64=old_b64, new_b64=new_b64, replace_all=replace_all)
|
||||
result = self.execute(cmd)
|
||||
|
||||
exit_code = result.exit_code
|
||||
@@ -260,14 +258,12 @@ except PermissionError:
|
||||
if exit_code == 1:
|
||||
return EditResult(error=f"Error: String not found in file: '{old_string}'")
|
||||
if exit_code == 2:
|
||||
return EditResult(
|
||||
error=f"Error: String '{old_string}' appears multiple times. Use replace_all=True to replace all occurrences."
|
||||
)
|
||||
return EditResult(error=f"Error: String '{old_string}' appears multiple times. Use replace_all=True to replace all occurrences.")
|
||||
if exit_code != 0:
|
||||
return EditResult(error=f"Error: File '{file_path}' not found")
|
||||
|
||||
count = int(output)
|
||||
# 외부 저장소 - files_update 필요 없음
|
||||
# External storage - no files_update needed
|
||||
return EditResult(path=file_path, files_update=None, occurrences=count)
|
||||
|
||||
def grep_raw(
|
||||
@@ -276,18 +272,18 @@ except PermissionError:
|
||||
path: str | None = None,
|
||||
glob: str | None = None,
|
||||
) -> list[GrepMatch] | str:
|
||||
"""구조화된 검색 결과 또는 잘못된 입력에 대한 에러 문자열을 반환합니다."""
|
||||
"""Structured search results or error string for invalid input."""
|
||||
search_path = shlex.quote(path or ".")
|
||||
|
||||
# 구조화된 출력을 얻기 위해 grep 명령 생성
|
||||
grep_opts = "-rHnF" # 재귀적, 파일명 포함, 라인 번호 포함, 고정 문자열 (리터럴)
|
||||
# Build grep command to get structured output
|
||||
grep_opts = "-rHnF" # recursive, with filename, with line number, fixed-strings (literal)
|
||||
|
||||
# 지정된 경우 glob 패턴 추가
|
||||
# Add glob pattern if specified
|
||||
glob_pattern = ""
|
||||
if glob:
|
||||
glob_pattern = f"--include='{glob}'"
|
||||
|
||||
# 쉘을 위해 패턴 이스케이프
|
||||
# Escape pattern for shell
|
||||
pattern_escaped = shlex.quote(pattern)
|
||||
|
||||
cmd = f"grep {grep_opts} {glob_pattern} -e {pattern_escaped} {search_path} 2>/dev/null || true"
|
||||
@@ -297,23 +293,25 @@ except PermissionError:
|
||||
if not output:
|
||||
return []
|
||||
|
||||
# grep 출력을 GrepMatch 객체로 파싱
|
||||
# Parse grep output into GrepMatch objects
|
||||
matches: list[GrepMatch] = []
|
||||
for line in output.split("\n"):
|
||||
# 형식: 경로:라인번호:텍스트
|
||||
# Format is: path:line_number:text
|
||||
parts = line.split(":", 2)
|
||||
if len(parts) >= 3:
|
||||
matches.append({
|
||||
"path": parts[0],
|
||||
"line": int(parts[1]),
|
||||
"text": parts[2],
|
||||
})
|
||||
matches.append(
|
||||
{
|
||||
"path": parts[0],
|
||||
"line": int(parts[1]),
|
||||
"text": parts[2],
|
||||
}
|
||||
)
|
||||
|
||||
return matches
|
||||
|
||||
def glob_info(self, pattern: str, path: str = "/") -> list[FileInfo]:
|
||||
"""FileInfo dict를 반환하는 구조화된 glob 매칭입니다."""
|
||||
# 이스케이프 문제를 피하기 위해 패턴과 경로를 base64로 인코딩
|
||||
"""Structured glob matching returning FileInfo dicts."""
|
||||
# Encode pattern and path as base64 to avoid escaping issues
|
||||
pattern_b64 = base64.b64encode(pattern.encode("utf-8")).decode("ascii")
|
||||
path_b64 = base64.b64encode(path.encode("utf-8")).decode("ascii")
|
||||
|
||||
@@ -324,15 +322,17 @@ except PermissionError:
|
||||
if not output:
|
||||
return []
|
||||
|
||||
# JSON 출력을 FileInfo dict로 파싱
|
||||
# Parse JSON output into FileInfo dicts
|
||||
file_infos: list[FileInfo] = []
|
||||
for line in output.split("\n"):
|
||||
try:
|
||||
data = json.loads(line)
|
||||
file_infos.append({
|
||||
"path": data["path"],
|
||||
"is_dir": data["is_dir"],
|
||||
})
|
||||
file_infos.append(
|
||||
{
|
||||
"path": data["path"],
|
||||
"is_dir": data["is_dir"],
|
||||
}
|
||||
)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
@@ -341,20 +341,20 @@ except PermissionError:
|
||||
@property
|
||||
@abstractmethod
|
||||
def id(self) -> str:
|
||||
"""샌드박스 백엔드의 고유 식별자입니다."""
|
||||
"""Unique identifier for the sandbox backend."""
|
||||
|
||||
@abstractmethod
|
||||
def upload_files(self, files: list[tuple[str, bytes]]) -> list[FileUploadResponse]:
|
||||
"""샌드박스에 여러 파일을 업로드합니다.
|
||||
"""Upload multiple files to the sandbox.
|
||||
|
||||
구현체는 부분적 성공을 지원해야 합니다 - 파일별로 예외를 catch하고
|
||||
예외를 발생시키는 대신 FileUploadResponse 객체에 에러를 반환해야 합니다.
|
||||
Implementations must support partial success - catch exceptions per-file
|
||||
and return errors in FileUploadResponse objects rather than raising.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def download_files(self, paths: list[str]) -> list[FileDownloadResponse]:
|
||||
"""샌드박스에서 여러 파일을 다운로드합니다.
|
||||
"""Download multiple files from the sandbox.
|
||||
|
||||
구현체는 부분적 성공을 지원해야 합니다 - 파일별로 예외를 catch하고
|
||||
예외를 발생시키는 대신 FileDownloadResponse 객체에 에러를 반환해야 합니다.
|
||||
Implementations must support partial success - catch exceptions per-file
|
||||
and return errors in FileDownloadResponse objects rather than raising.
|
||||
"""
|
||||
|
||||
@@ -1,8 +1,16 @@
|
||||
"""StateBackend: 파일을 LangGraph 에이전트 상태(임시)에 저장되도록 합니다."""
|
||||
"""StateBackend: Store files in LangGraph agent state (ephemeral)."""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from deepagents.backends.protocol import BackendProtocol, EditResult, FileInfo, GrepMatch, WriteResult
|
||||
from deepagents.backends.protocol import (
|
||||
BackendProtocol,
|
||||
EditResult,
|
||||
FileDownloadResponse,
|
||||
FileInfo,
|
||||
FileUploadResponse,
|
||||
GrepMatch,
|
||||
WriteResult,
|
||||
)
|
||||
from deepagents.backends.utils import (
|
||||
_glob_search_files,
|
||||
create_file_data,
|
||||
@@ -18,28 +26,30 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class StateBackend(BackendProtocol):
|
||||
"""에이전트 상태(임시)에 파일을 저장하는 백엔드.
|
||||
"""Backend that stores files in agent state (ephemeral).
|
||||
|
||||
LangGraph의 상태 관리 및 체크포인팅을 사용합니다. 파일은 하나의 대화 스레드 내에서만 지속되며
|
||||
스레드 간에는 공유되지 않습니다. 상태는 각 에이전트 단계 후에 자동으로 체크포인트됩니다.
|
||||
Uses LangGraph's state management and checkpointing. Files persist within
|
||||
a conversation thread but not across threads. State is automatically
|
||||
checkpointed after each agent step.
|
||||
|
||||
특수 처리: LangGraph 상태는 (직접 변경이 아닌) Command 객체를 통해 업데이트되어야 하므로,
|
||||
작업은 None 대신 Command 객체를 반환할 수 있습니다. 이는 uses_state=True 플래그로 표시됩니다.
|
||||
Special handling: Since LangGraph state must be updated via Command objects
|
||||
(not direct mutation), operations return Command objects instead of None.
|
||||
This is indicated by the uses_state=True flag.
|
||||
"""
|
||||
|
||||
def __init__(self, runtime: "ToolRuntime"):
|
||||
"""런타임으로 StateBackend를 초기화합니다."""
|
||||
"""Initialize StateBackend with runtime."""
|
||||
self.runtime = runtime
|
||||
|
||||
def ls_info(self, path: str) -> list[FileInfo]:
|
||||
"""지정된 디렉토리의 파일과 디렉토리를 나열합니다 (비재귀적).
|
||||
"""List files and directories in the specified directory (non-recursive).
|
||||
|
||||
Args:
|
||||
path: 디렉토리의 절대 경로.
|
||||
path: Absolute path to directory.
|
||||
|
||||
Returns:
|
||||
디렉토리 바로 아래에 있는 파일 및 디렉토리에 대한 FileInfo 유사 dict 목록.
|
||||
디렉토리는 경로 끝에 /가 붙으며 is_dir=True입니다.
|
||||
List of FileInfo-like dicts for files and directories directly in the directory.
|
||||
Directories have a trailing / in their path and is_dir=True.
|
||||
"""
|
||||
files = self.runtime.state.get("files", {})
|
||||
infos: list[FileInfo] = []
|
||||
@@ -65,21 +75,25 @@ class StateBackend(BackendProtocol):
|
||||
|
||||
# This is a file directly in the current directory
|
||||
size = len("\n".join(fd.get("content", [])))
|
||||
infos.append({
|
||||
"path": k,
|
||||
"is_dir": False,
|
||||
"size": int(size),
|
||||
"modified_at": fd.get("modified_at", ""),
|
||||
})
|
||||
infos.append(
|
||||
{
|
||||
"path": k,
|
||||
"is_dir": False,
|
||||
"size": int(size),
|
||||
"modified_at": fd.get("modified_at", ""),
|
||||
}
|
||||
)
|
||||
|
||||
# Add directories to the results
|
||||
for subdir in sorted(subdirs):
|
||||
infos.append({
|
||||
"path": subdir,
|
||||
"is_dir": True,
|
||||
"size": 0,
|
||||
"modified_at": "",
|
||||
})
|
||||
infos.append(
|
||||
{
|
||||
"path": subdir,
|
||||
"is_dir": True,
|
||||
"size": 0,
|
||||
"modified_at": "",
|
||||
}
|
||||
)
|
||||
|
||||
infos.sort(key=lambda x: x.get("path", ""))
|
||||
return infos
|
||||
@@ -90,15 +104,15 @@ class StateBackend(BackendProtocol):
|
||||
offset: int = 0,
|
||||
limit: int = 2000,
|
||||
) -> str:
|
||||
"""파일 내용을 라인 번호와 함께 읽습니다.
|
||||
"""Read file content with line numbers.
|
||||
|
||||
Args:
|
||||
file_path: 파일 절대 경로.
|
||||
offset: 읽기 시작할 라인 오프셋 (0부터 시작).
|
||||
limit: 읽을 최대 라인 수.
|
||||
file_path: Absolute file path.
|
||||
offset: Line offset to start reading from (0-indexed).
|
||||
limit: Maximum number of lines to read.
|
||||
|
||||
Returns:
|
||||
라인 번호가 포함된 형식화된 파일 내용, 또는 에러 메시지.
|
||||
Formatted file content with line numbers, or error message.
|
||||
"""
|
||||
files = self.runtime.state.get("files", {})
|
||||
file_data = files.get(file_path)
|
||||
@@ -113,15 +127,13 @@ class StateBackend(BackendProtocol):
|
||||
file_path: str,
|
||||
content: str,
|
||||
) -> WriteResult:
|
||||
"""내용을 포함하는 새 파일을 생성합니다.
|
||||
LangGraph 상태 업데이트를 위한 files_update가 포함된 WriteResult를 반환합니다.
|
||||
"""Create a new file with content.
|
||||
Returns WriteResult with files_update to update LangGraph state.
|
||||
"""
|
||||
files = self.runtime.state.get("files", {})
|
||||
|
||||
if file_path in files:
|
||||
return WriteResult(
|
||||
error=f"Cannot write to {file_path} because it already exists. Read and then make an edit, or write to a new path."
|
||||
)
|
||||
return WriteResult(error=f"Cannot write to {file_path} because it already exists. Read and then make an edit, or write to a new path.")
|
||||
|
||||
new_file_data = create_file_data(content)
|
||||
return WriteResult(path=file_path, files_update={file_path: new_file_data})
|
||||
@@ -133,8 +145,8 @@ class StateBackend(BackendProtocol):
|
||||
new_string: str,
|
||||
replace_all: bool = False,
|
||||
) -> EditResult:
|
||||
"""문자열 발생(occurrences)을 교체하여 파일을 편집합니다.
|
||||
files_update와 occurrences가 포함된 EditResult를 반환합니다.
|
||||
"""Edit a file by replacing string occurrences.
|
||||
Returns EditResult with files_update and occurrences.
|
||||
"""
|
||||
files = self.runtime.state.get("files", {})
|
||||
file_data = files.get(file_path)
|
||||
@@ -162,7 +174,7 @@ class StateBackend(BackendProtocol):
|
||||
return grep_matches_from_files(files, pattern, path, glob)
|
||||
|
||||
def glob_info(self, pattern: str, path: str = "/") -> list[FileInfo]:
|
||||
"""glob 패턴과 일치하는 파일에 대한 FileInfo를 가져옵니다."""
|
||||
"""Get FileInfo for files matching glob pattern."""
|
||||
files = self.runtime.state.get("files", {})
|
||||
result = _glob_search_files(files, pattern, path)
|
||||
if result == "No files found":
|
||||
@@ -172,10 +184,53 @@ class StateBackend(BackendProtocol):
|
||||
for p in paths:
|
||||
fd = files.get(p)
|
||||
size = len("\n".join(fd.get("content", []))) if fd else 0
|
||||
infos.append({
|
||||
"path": p,
|
||||
"is_dir": False,
|
||||
"size": int(size),
|
||||
"modified_at": fd.get("modified_at", "") if fd else "",
|
||||
})
|
||||
infos.append(
|
||||
{
|
||||
"path": p,
|
||||
"is_dir": False,
|
||||
"size": int(size),
|
||||
"modified_at": fd.get("modified_at", "") if fd else "",
|
||||
}
|
||||
)
|
||||
return infos
|
||||
|
||||
def upload_files(self, files: list[tuple[str, bytes]]) -> list[FileUploadResponse]:
|
||||
"""Upload multiple files to state.
|
||||
|
||||
Args:
|
||||
files: List of (path, content) tuples to upload
|
||||
|
||||
Returns:
|
||||
List of FileUploadResponse objects, one per input file
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"StateBackend does not support upload_files yet. You can upload files "
|
||||
"directly by passing them in invoke if you're storing files in the memory."
|
||||
)
|
||||
|
||||
def download_files(self, paths: list[str]) -> list[FileDownloadResponse]:
|
||||
"""Download multiple files from state.
|
||||
|
||||
Args:
|
||||
paths: List of file paths to download
|
||||
|
||||
Returns:
|
||||
List of FileDownloadResponse objects, one per input path
|
||||
"""
|
||||
state_files = self.runtime.state.get("files", {})
|
||||
responses: list[FileDownloadResponse] = []
|
||||
|
||||
for path in paths:
|
||||
file_data = state_files.get(path)
|
||||
|
||||
if file_data is None:
|
||||
responses.append(FileDownloadResponse(path=path, content=None, error="file_not_found"))
|
||||
continue
|
||||
|
||||
# Convert file data to bytes
|
||||
content_str = file_data_to_string(file_data)
|
||||
content_bytes = content_str.encode("utf-8")
|
||||
|
||||
responses.append(FileDownloadResponse(path=path, content=content_bytes, error=None))
|
||||
|
||||
return responses
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""StoreBackend: LangGraph의 BaseStore(영구적, 스레드 간 공유)를 위한 어댑터."""
|
||||
"""StoreBackend: Adapter for LangGraph's BaseStore (persistent, cross-thread)."""
|
||||
|
||||
from typing import Any
|
||||
|
||||
@@ -26,30 +26,30 @@ from deepagents.backends.utils import (
|
||||
|
||||
|
||||
class StoreBackend(BackendProtocol):
|
||||
"""파일을 LangGraph의 BaseStore(영구적)에 저장하는 백엔드.
|
||||
"""Backend that stores files in LangGraph's BaseStore (persistent).
|
||||
|
||||
LangGraph의 Store를 사용하여 영구적이고 대화 간 공유되는 저장소를 사용합니다.
|
||||
파일은 네임스페이스를 통해 조직화되며 모든 스레드에서 지속됩니다.
|
||||
Uses LangGraph's Store for persistent, cross-conversation storage.
|
||||
Files are organized via namespaces and persist across all threads.
|
||||
|
||||
네임스페이스는 다중 에이전트 격리를 위해 선택적 assistant_id를 포함할 수 있습니다.
|
||||
The namespace can include an optional assistant_id for multi-agent isolation.
|
||||
"""
|
||||
|
||||
def __init__(self, runtime: "ToolRuntime"):
|
||||
"""런타임으로 StoreBackend를 초기화합니다.
|
||||
"""Initialize StoreBackend with runtime.
|
||||
|
||||
Args:
|
||||
runtime: 저장소 접근 및 구성을 제공하는 ToolRuntime 인스턴스.
|
||||
runtime: The ToolRuntime instance providing store access and configuration.
|
||||
"""
|
||||
self.runtime = runtime
|
||||
|
||||
def _get_store(self) -> BaseStore:
|
||||
"""저장소(store) 인스턴스를 가져옵니다.
|
||||
"""Get the store instance.
|
||||
|
||||
Returns:
|
||||
런타임의 BaseStore 인스턴스.
|
||||
BaseStore instance from the runtime.
|
||||
|
||||
Raises:
|
||||
ValueError: 런타임에서 저장소를 사용할 수 없는 경우.
|
||||
ValueError: If no store is available in the runtime.
|
||||
"""
|
||||
store = self.runtime.store
|
||||
if store is None:
|
||||
@@ -58,15 +58,15 @@ class StoreBackend(BackendProtocol):
|
||||
return store
|
||||
|
||||
def _get_namespace(self) -> tuple[str, ...]:
|
||||
"""저장소 작업을 위한 네임스페이스를 가져옵니다.
|
||||
"""Get the namespace for store operations.
|
||||
|
||||
우선순위:
|
||||
1) 존재하는 경우 `self.runtime.config` 사용 (테스트에서 명시적으로 전달).
|
||||
2) 가능한 경우 `langgraph.config.get_config()`로 폴백(fallback).
|
||||
3) ("filesystem",)으로 기본 설정.
|
||||
Preference order:
|
||||
1) Use `self.runtime.config` if present (tests pass this explicitly).
|
||||
2) Fallback to `langgraph.config.get_config()` if available.
|
||||
3) Default to ("filesystem",).
|
||||
|
||||
config 메타데이터에 assistant_id가 있는 경우,
|
||||
에이전트별 격리를 제공하기 위해 (assistant_id, "filesystem")을 반환합니다.
|
||||
If an assistant_id is available in the config metadata, return
|
||||
(assistant_id, "filesystem") to provide per-assistant isolation.
|
||||
"""
|
||||
namespace = "filesystem"
|
||||
|
||||
@@ -95,16 +95,16 @@ class StoreBackend(BackendProtocol):
|
||||
return (namespace,)
|
||||
|
||||
def _convert_store_item_to_file_data(self, store_item: Item) -> dict[str, Any]:
|
||||
"""저장소 Item을 FileData 형식으로 변환합니다.
|
||||
"""Convert a store Item to FileData format.
|
||||
|
||||
Args:
|
||||
store_item: 파일 데이터를 포함하는 저장소 Item.
|
||||
store_item: The store Item containing file data.
|
||||
|
||||
Returns:
|
||||
content, created_at, modified_at 필드를 포함하는 FileData dict.
|
||||
FileData dict with content, created_at, and modified_at fields.
|
||||
|
||||
Raises:
|
||||
ValueError: 필수 필드가 누락되었거나 올바르지 않은 타입인 경우.
|
||||
ValueError: If required fields are missing or have incorrect types.
|
||||
"""
|
||||
if "content" not in store_item.value or not isinstance(store_item.value["content"], list):
|
||||
msg = f"Store item does not contain valid content field. Got: {store_item.value.keys()}"
|
||||
@@ -122,13 +122,13 @@ class StoreBackend(BackendProtocol):
|
||||
}
|
||||
|
||||
def _convert_file_data_to_store_value(self, file_data: dict[str, Any]) -> dict[str, Any]:
|
||||
"""FileData를 store.put()에 적합한 dict로 변환합니다.
|
||||
"""Convert FileData to a dict suitable for store.put().
|
||||
|
||||
Args:
|
||||
file_data: 변환할 FileData.
|
||||
file_data: The FileData to convert.
|
||||
|
||||
Returns:
|
||||
content, created_at, modified_at 필드를 포함하는 딕셔너리.
|
||||
Dictionary with content, created_at, and modified_at fields.
|
||||
"""
|
||||
return {
|
||||
"content": file_data["content"],
|
||||
@@ -145,17 +145,17 @@ class StoreBackend(BackendProtocol):
|
||||
filter: dict[str, Any] | None = None,
|
||||
page_size: int = 100,
|
||||
) -> list[Item]:
|
||||
"""자동 페이지네이션으로 저장소를 검색하여 모든 결과를 가져옵니다.
|
||||
"""Search store with automatic pagination to retrieve all results.
|
||||
|
||||
Args:
|
||||
store: 검색할 저장소.
|
||||
namespace: 검색할 계층적 경로 접두사(prefix).
|
||||
query: 자연어 검색을 위한 선택적 쿼리.
|
||||
filter: 결과 필터링을 위한 키-값 쌍.
|
||||
page_size: 페이지당 가져올 아이템 수 (기본값: 100).
|
||||
store: The store to search.
|
||||
namespace: Hierarchical path prefix to search within.
|
||||
query: Optional query for natural language search.
|
||||
filter: Key-value pairs to filter results.
|
||||
page_size: Number of items to fetch per page (default: 100).
|
||||
|
||||
Returns:
|
||||
검색 조건과 일치하는 모든 아이템 목록.
|
||||
List of all items matching the search criteria.
|
||||
|
||||
Example:
|
||||
```python
|
||||
@@ -184,14 +184,14 @@ class StoreBackend(BackendProtocol):
|
||||
return all_items
|
||||
|
||||
def ls_info(self, path: str) -> list[FileInfo]:
|
||||
"""지정된 디렉토리의 파일과 디렉토리를 나열합니다 (비재귀적).
|
||||
"""List files and directories in the specified directory (non-recursive).
|
||||
|
||||
Args:
|
||||
path: 디렉토리의 절대 경로.
|
||||
path: Absolute path to directory.
|
||||
|
||||
Returns:
|
||||
디렉토리 바로 아래에 있는 파일 및 디렉토리에 대한 FileInfo 유사 dict 목록.
|
||||
디렉토리는 경로 끝에 /가 붙으며 is_dir=True입니다.
|
||||
List of FileInfo-like dicts for files and directories directly in the directory.
|
||||
Directories have a trailing / in their path and is_dir=True.
|
||||
"""
|
||||
store = self._get_store()
|
||||
namespace = self._get_namespace()
|
||||
@@ -226,21 +226,25 @@ class StoreBackend(BackendProtocol):
|
||||
except ValueError:
|
||||
continue
|
||||
size = len("\n".join(fd.get("content", [])))
|
||||
infos.append({
|
||||
"path": item.key,
|
||||
"is_dir": False,
|
||||
"size": int(size),
|
||||
"modified_at": fd.get("modified_at", ""),
|
||||
})
|
||||
infos.append(
|
||||
{
|
||||
"path": item.key,
|
||||
"is_dir": False,
|
||||
"size": int(size),
|
||||
"modified_at": fd.get("modified_at", ""),
|
||||
}
|
||||
)
|
||||
|
||||
# Add directories to the results
|
||||
for subdir in sorted(subdirs):
|
||||
infos.append({
|
||||
"path": subdir,
|
||||
"is_dir": True,
|
||||
"size": 0,
|
||||
"modified_at": "",
|
||||
})
|
||||
infos.append(
|
||||
{
|
||||
"path": subdir,
|
||||
"is_dir": True,
|
||||
"size": 0,
|
||||
"modified_at": "",
|
||||
}
|
||||
)
|
||||
|
||||
infos.sort(key=lambda x: x.get("path", ""))
|
||||
return infos
|
||||
@@ -251,15 +255,15 @@ class StoreBackend(BackendProtocol):
|
||||
offset: int = 0,
|
||||
limit: int = 2000,
|
||||
) -> str:
|
||||
"""파일 내용을 라인 번호와 함께 읽습니다.
|
||||
"""Read file content with line numbers.
|
||||
|
||||
Args:
|
||||
file_path: 파일 절대 경로.
|
||||
offset: 읽기 시작할 라인 오프셋 (0부터 시작).
|
||||
limit: 읽을 최대 라인 수.
|
||||
file_path: Absolute file path.
|
||||
offset: Line offset to start reading from (0-indexed).
|
||||
limit: Maximum number of lines to read.
|
||||
|
||||
Returns:
|
||||
라인 번호가 포함된 형식화된 파일 내용, 또는 에러 메시지.
|
||||
Formatted file content with line numbers, or error message.
|
||||
"""
|
||||
store = self._get_store()
|
||||
namespace = self._get_namespace()
|
||||
@@ -280,8 +284,8 @@ class StoreBackend(BackendProtocol):
|
||||
file_path: str,
|
||||
content: str,
|
||||
) -> WriteResult:
|
||||
"""내용을 포함하는 새 파일을 생성합니다.
|
||||
WriteResult를 반환합니다. 외부 저장소는 files_update=None을 설정합니다.
|
||||
"""Create a new file with content.
|
||||
Returns WriteResult. External storage sets files_update=None.
|
||||
"""
|
||||
store = self._get_store()
|
||||
namespace = self._get_namespace()
|
||||
@@ -289,9 +293,7 @@ class StoreBackend(BackendProtocol):
|
||||
# Check if file exists
|
||||
existing = store.get(namespace, file_path)
|
||||
if existing is not None:
|
||||
return WriteResult(
|
||||
error=f"Cannot write to {file_path} because it already exists. Read and then make an edit, or write to a new path."
|
||||
)
|
||||
return WriteResult(error=f"Cannot write to {file_path} because it already exists. Read and then make an edit, or write to a new path.")
|
||||
|
||||
# Create new file
|
||||
file_data = create_file_data(content)
|
||||
@@ -306,8 +308,8 @@ class StoreBackend(BackendProtocol):
|
||||
new_string: str,
|
||||
replace_all: bool = False,
|
||||
) -> EditResult:
|
||||
"""문자열 발생(occurrences)을 교체하여 파일을 편집합니다.
|
||||
EditResult를 반환합니다. 외부 저장소는 files_update=None을 설정합니다.
|
||||
"""Edit a file by replacing string occurrences.
|
||||
Returns EditResult. External storage sets files_update=None.
|
||||
"""
|
||||
store = self._get_store()
|
||||
namespace = self._get_namespace()
|
||||
@@ -373,23 +375,25 @@ class StoreBackend(BackendProtocol):
|
||||
for p in paths:
|
||||
fd = files.get(p)
|
||||
size = len("\n".join(fd.get("content", []))) if fd else 0
|
||||
infos.append({
|
||||
"path": p,
|
||||
"is_dir": False,
|
||||
"size": int(size),
|
||||
"modified_at": fd.get("modified_at", "") if fd else "",
|
||||
})
|
||||
infos.append(
|
||||
{
|
||||
"path": p,
|
||||
"is_dir": False,
|
||||
"size": int(size),
|
||||
"modified_at": fd.get("modified_at", "") if fd else "",
|
||||
}
|
||||
)
|
||||
return infos
|
||||
|
||||
def upload_files(self, files: list[tuple[str, bytes]]) -> list[FileUploadResponse]:
|
||||
"""저장소에 여러 파일을 업로드합니다.
|
||||
"""Upload multiple files to the store.
|
||||
|
||||
Args:
|
||||
files: 내용이 bytes인 (path, content) 튜플의 리스트.
|
||||
files: List of (path, content) tuples where content is bytes.
|
||||
|
||||
Returns:
|
||||
FileUploadResponse 객체들의 리스트. 입력 파일마다 하나씩 반환됩니다.
|
||||
응답 순서는 입력 순서와 일치합니다.
|
||||
List of FileUploadResponse objects, one per input file.
|
||||
Response order matches input order.
|
||||
"""
|
||||
store = self._get_store()
|
||||
namespace = self._get_namespace()
|
||||
@@ -408,14 +412,14 @@ class StoreBackend(BackendProtocol):
|
||||
return responses
|
||||
|
||||
def download_files(self, paths: list[str]) -> list[FileDownloadResponse]:
|
||||
"""저장소에서 여러 파일을 다운로드합니다.
|
||||
"""Download multiple files from the store.
|
||||
|
||||
Args:
|
||||
paths: 다운로드할 파일 경로의 리스트.
|
||||
paths: List of file paths to download.
|
||||
|
||||
Returns:
|
||||
FileDownloadResponse 객체들의 리스트. 입력 경로마다 하나씩 반환됩니다.
|
||||
응답 순서는 입력 순서와 일치합니다.
|
||||
List of FileDownloadResponse objects, one per input path.
|
||||
Response order matches input order.
|
||||
"""
|
||||
store = self._get_store()
|
||||
namespace = self._get_namespace()
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
"""메모리 백엔드 구현을 위한 공유 유틸리티 함수들.
|
||||
"""Shared utility functions for memory backend implementations.
|
||||
|
||||
이 모듈은 백엔드와 복합 라우터(composite router)에서 사용하는
|
||||
사용자 대면 문자열 포맷터와 구조적 헬퍼 함수를 포함합니다.
|
||||
구조적 헬퍼는 깨지기 쉬운 문자열 파싱 없이 구성을 가능하게 합니다.
|
||||
This module contains both user-facing string formatters and structured
|
||||
helpers used by backends and the composite router. Structured helpers
|
||||
enable composition without fragile string parsing.
|
||||
"""
|
||||
|
||||
import re
|
||||
@@ -27,9 +27,9 @@ GrepMatch = _GrepMatch
|
||||
|
||||
|
||||
def sanitize_tool_call_id(tool_call_id: str) -> str:
|
||||
r"""경로 탐색(path traversal) 및 구분자 문제를 방지하기 위해 tool_call_id를 정리(sanitize)합니다.
|
||||
r"""Sanitize tool_call_id to prevent path traversal and separator issues.
|
||||
|
||||
위험한 문자(., /, \)를 밑줄(_)로 교체합니다.
|
||||
Replaces dangerous characters (., /, \) with underscores.
|
||||
"""
|
||||
sanitized = tool_call_id.replace(".", "_").replace("/", "_").replace("\\", "_")
|
||||
return sanitized
|
||||
@@ -39,16 +39,16 @@ def format_content_with_line_numbers(
|
||||
content: str | list[str],
|
||||
start_line: int = 1,
|
||||
) -> str:
|
||||
"""파일 내용을 라인 번호와 함께 포맷팅합니다 (cat -n 스타일).
|
||||
"""Format file content with line numbers (cat -n style).
|
||||
|
||||
MAX_LINE_LENGTH보다 긴 라인은 연속 마커(예: 5.1, 5.2)와 함께 청크로 나눕니다.
|
||||
Chunks lines longer than MAX_LINE_LENGTH with continuation markers (e.g., 5.1, 5.2).
|
||||
|
||||
Args:
|
||||
content: 문자열 또는 라인 리스트 형태의 파일 내용
|
||||
start_line: 시작 라인 번호 (기본값: 1)
|
||||
content: File content as string or list of lines
|
||||
start_line: Starting line number (default: 1)
|
||||
|
||||
Returns:
|
||||
라인 번호와 연속 마커가 포함된 포맷팅된 내용
|
||||
Formatted content with line numbers and continuation markers
|
||||
"""
|
||||
if isinstance(content, str):
|
||||
lines = content.split("\n")
|
||||
@@ -82,13 +82,13 @@ def format_content_with_line_numbers(
|
||||
|
||||
|
||||
def check_empty_content(content: str) -> str | None:
|
||||
"""내용이 비어 있는지 확인하고 경고 메시지를 반환합니다.
|
||||
"""Check if content is empty and return warning message.
|
||||
|
||||
Args:
|
||||
content: 확인할 내용
|
||||
content: Content to check
|
||||
|
||||
Returns:
|
||||
비어 있는 경우 경고 메시지, 그렇지 않으면 None
|
||||
Warning message if empty, None otherwise
|
||||
"""
|
||||
if not content or content.strip() == "":
|
||||
return EMPTY_CONTENT_WARNING
|
||||
@@ -96,26 +96,26 @@ def check_empty_content(content: str) -> str | None:
|
||||
|
||||
|
||||
def file_data_to_string(file_data: dict[str, Any]) -> str:
|
||||
"""FileData를 일반 문자열 내용으로 변환합니다.
|
||||
"""Convert FileData to plain string content.
|
||||
|
||||
Args:
|
||||
file_data: 'content' 키를 가진 FileData dict
|
||||
file_data: FileData dict with 'content' key
|
||||
|
||||
Returns:
|
||||
줄바꿈으로 연결된 문자열 형태의 내용
|
||||
Content as string with lines joined by newlines
|
||||
"""
|
||||
return "\n".join(file_data["content"])
|
||||
|
||||
|
||||
def create_file_data(content: str, created_at: str | None = None) -> dict[str, Any]:
|
||||
"""타임스탬프를 포함하는 FileData 객체를 생성합니다.
|
||||
"""Create a FileData object with timestamps.
|
||||
|
||||
Args:
|
||||
content: 문자열 형태의 파일 내용
|
||||
created_at: 선택적 생성 타임스탬프 (ISO 형식)
|
||||
content: File content as string
|
||||
created_at: Optional creation timestamp (ISO format)
|
||||
|
||||
Returns:
|
||||
내용과 타임스탬프를 포함하는 FileData dict
|
||||
FileData dict with content and timestamps
|
||||
"""
|
||||
lines = content.split("\n") if isinstance(content, str) else content
|
||||
now = datetime.now(UTC).isoformat()
|
||||
@@ -128,14 +128,14 @@ def create_file_data(content: str, created_at: str | None = None) -> dict[str, A
|
||||
|
||||
|
||||
def update_file_data(file_data: dict[str, Any], content: str) -> dict[str, Any]:
|
||||
"""생성 타임스탬프를 유지하면서 새로운 내용으로 FileData를 업데이트합니다.
|
||||
"""Update FileData with new content, preserving creation timestamp.
|
||||
|
||||
Args:
|
||||
file_data: 기존 FileData dict
|
||||
content: 문자열 형태의 새로운 내용
|
||||
file_data: Existing FileData dict
|
||||
content: New content as string
|
||||
|
||||
Returns:
|
||||
업데이트된 FileData dict
|
||||
Updated FileData dict
|
||||
"""
|
||||
lines = content.split("\n") if isinstance(content, str) else content
|
||||
now = datetime.now(UTC).isoformat()
|
||||
@@ -152,15 +152,15 @@ def format_read_response(
|
||||
offset: int,
|
||||
limit: int,
|
||||
) -> str:
|
||||
"""읽기 응답을 위해 파일 데이터를 라인 번호와 함께 포맷팅합니다.
|
||||
"""Format file data for read response with line numbers.
|
||||
|
||||
Args:
|
||||
file_data: FileData dict
|
||||
offset: 라인 오프셋 (0부터 시작)
|
||||
limit: 최대 라인 수
|
||||
offset: Line offset (0-indexed)
|
||||
limit: Maximum number of lines
|
||||
|
||||
Returns:
|
||||
포맷팅된 내용 또는 에러 메시지
|
||||
Formatted content or error message
|
||||
"""
|
||||
content = file_data_to_string(file_data)
|
||||
empty_msg = check_empty_content(content)
|
||||
@@ -184,16 +184,16 @@ def perform_string_replacement(
|
||||
new_string: str,
|
||||
replace_all: bool,
|
||||
) -> tuple[str, int] | str:
|
||||
"""발생(occurrence) 검증과 함께 문자열 교체를 수행합니다.
|
||||
"""Perform string replacement with occurrence validation.
|
||||
|
||||
Args:
|
||||
content: 원본 내용
|
||||
old_string: 교체할 문자열
|
||||
new_string: 새로운 문자열
|
||||
replace_all: 모든 발생을 교체할지 여부
|
||||
content: Original content
|
||||
old_string: String to replace
|
||||
new_string: Replacement string
|
||||
replace_all: Whether to replace all occurrences
|
||||
|
||||
Returns:
|
||||
성공 시 (new_content, occurrences) 튜플, 또는 에러 메시지 문자열
|
||||
Tuple of (new_content, occurrences) on success, or error message string
|
||||
"""
|
||||
occurrences = content.count(old_string)
|
||||
|
||||
@@ -208,7 +208,7 @@ def perform_string_replacement(
|
||||
|
||||
|
||||
def truncate_if_too_long(result: list[str] | str) -> list[str] | str:
|
||||
"""토큰 제한을 초과하는 경우 리스트 또는 문자열 결과를 잘라냅니다 (대략적 추정: 4자/토큰)."""
|
||||
"""Truncate list or string result if it exceeds token limit (rough estimate: 4 chars/token)."""
|
||||
if isinstance(result, list):
|
||||
total_chars = sum(len(item) for item in result)
|
||||
if total_chars > TOOL_RESULT_TOKEN_LIMIT * 4:
|
||||
@@ -221,16 +221,16 @@ def truncate_if_too_long(result: list[str] | str) -> list[str] | str:
|
||||
|
||||
|
||||
def _validate_path(path: str | None) -> str:
|
||||
"""경로를 검증하고 정규화합니다.
|
||||
"""Validate and normalize a path.
|
||||
|
||||
Args:
|
||||
path: 검증할 경로
|
||||
path: Path to validate
|
||||
|
||||
Returns:
|
||||
/로 시작하는 정규화된 경로
|
||||
Normalized path starting with /
|
||||
|
||||
Raises:
|
||||
ValueError: 경로가 유효하지 않은 경우
|
||||
ValueError: If path is invalid
|
||||
"""
|
||||
path = path or "/"
|
||||
if not path or path.strip() == "":
|
||||
@@ -249,16 +249,16 @@ def _glob_search_files(
|
||||
pattern: str,
|
||||
path: str = "/",
|
||||
) -> str:
|
||||
"""glob 패턴과 일치하는 경로를 찾기 위해 파일 dict를 검색합니다.
|
||||
"""Search files dict for paths matching glob pattern.
|
||||
|
||||
Args:
|
||||
files: 파일 경로에서 FileData로의 딕셔너리.
|
||||
pattern: Glob 패턴 (예: "*.py", "**/*.ts").
|
||||
path: 검색을 시작할 기본 경로.
|
||||
files: Dictionary of file paths to FileData.
|
||||
pattern: Glob pattern (e.g., "*.py", "**/*.ts").
|
||||
path: Base path to search from.
|
||||
|
||||
Returns:
|
||||
수정 시간순(최신순)으로 정렬된, 줄바꿈으로 구분된 파일 경로들.
|
||||
일치하는 항목이 없으면 "No files found"를 반환합니다.
|
||||
Newline-separated file paths, sorted by modification time (most recent first).
|
||||
Returns "No files found" if no matches.
|
||||
|
||||
Example:
|
||||
```python
|
||||
@@ -301,14 +301,14 @@ def _format_grep_results(
|
||||
results: dict[str, list[tuple[int, str]]],
|
||||
output_mode: Literal["files_with_matches", "content", "count"],
|
||||
) -> str:
|
||||
"""출력 모드에 따라 grep 검색 결과를 포맷팅합니다.
|
||||
"""Format grep search results based on output mode.
|
||||
|
||||
Args:
|
||||
results: 파일 경로에서 (line_num, line_content) 튜플 리스트로의 딕셔너리
|
||||
output_mode: 출력 형식 - "files_with_matches", "content", 또는 "count"
|
||||
results: Dictionary mapping file paths to list of (line_num, line_content) tuples
|
||||
output_mode: Output format - "files_with_matches", "content", or "count"
|
||||
|
||||
Returns:
|
||||
포맷팅된 문자열 출력
|
||||
Formatted string output
|
||||
"""
|
||||
if output_mode == "files_with_matches":
|
||||
return "\n".join(sorted(results.keys()))
|
||||
@@ -333,17 +333,17 @@ def _grep_search_files(
|
||||
glob: str | None = None,
|
||||
output_mode: Literal["files_with_matches", "content", "count"] = "files_with_matches",
|
||||
) -> str:
|
||||
"""정규식 패턴에 대해 파일 내용을 검색합니다.
|
||||
"""Search file contents for regex pattern.
|
||||
|
||||
Args:
|
||||
files: 파일 경로에서 FileData로의 딕셔너리.
|
||||
pattern: 검색할 정규식 패턴.
|
||||
path: 검색을 시작할 기본 경로.
|
||||
glob: 파일을 필터링할 선택적 glob 패턴 (예: "*.py").
|
||||
output_mode: 출력 형식 - "files_with_matches", "content", 또는 "count".
|
||||
files: Dictionary of file paths to FileData.
|
||||
pattern: Regex pattern to search for.
|
||||
path: Base path to search from.
|
||||
glob: Optional glob pattern to filter files (e.g., "*.py").
|
||||
output_mode: Output format - "files_with_matches", "content", or "count".
|
||||
|
||||
Returns:
|
||||
포맷팅된 검색 결과. 결과가 없으면 "No matches found"를 반환합니다.
|
||||
Formatted search results. Returns "No matches found" if no results.
|
||||
|
||||
Example:
|
||||
```python
|
||||
@@ -389,11 +389,11 @@ def grep_matches_from_files(
|
||||
path: str | None = None,
|
||||
glob: str | None = None,
|
||||
) -> list[GrepMatch] | str:
|
||||
"""인메모리 파일 매핑에서 구조화된 grep 일치 항목을 반환합니다.
|
||||
"""Return structured grep matches from an in-memory files mapping.
|
||||
|
||||
성공 시 GrepMatch 리스트를 반환하며, 잘못된 입력(예: 잘못된 정규식)의 경우 문자열을 반환합니다.
|
||||
도구 컨텍스트에서 백엔드가 예외를 발생시키지 않고 사용자 대면 에러 메시지를 보존하기 위해,
|
||||
여기서는 의도적으로 예외를 발생시키지 않습니다.
|
||||
Returns a list of GrepMatch on success, or a string for invalid inputs
|
||||
(e.g., invalid regex). We deliberately do not raise here to keep backends
|
||||
non-throwing in tool contexts and preserve user-facing error messages.
|
||||
"""
|
||||
try:
|
||||
regex = re.compile(pattern)
|
||||
@@ -419,7 +419,7 @@ def grep_matches_from_files(
|
||||
|
||||
|
||||
def build_grep_results_dict(matches: list[GrepMatch]) -> dict[str, list[tuple[int, str]]]:
|
||||
"""구조화된 일치 항목을 포맷터가 사용하는 레거시 dict 형태로 그룹화합니다."""
|
||||
"""Group structured matches into the legacy dict form used by formatters."""
|
||||
grouped: dict[str, list[tuple[int, str]]] = {}
|
||||
for m in matches:
|
||||
grouped.setdefault(m["path"], []).append((m["line"], m["text"]))
|
||||
@@ -430,7 +430,7 @@ def format_grep_matches(
|
||||
matches: list[GrepMatch],
|
||||
output_mode: Literal["files_with_matches", "content", "count"],
|
||||
) -> str:
|
||||
"""기존 포맷팅 로직을 사용하여 구조화된 grep 일치 항목을 포맷팅합니다."""
|
||||
"""Format structured grep matches using existing formatting logic."""
|
||||
if not matches:
|
||||
return "No matches found"
|
||||
return _format_grep_results(build_grep_results_dict(matches), output_mode)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Deepagents는 계획(planning), 파일시스템(filesystem), 하위 에이전트(subagents) 기능을 포함합니다."""
|
||||
"""Deepagents come with planning, filesystem, and subagents."""
|
||||
|
||||
from collections.abc import Callable, Sequence
|
||||
from typing import Any
|
||||
@@ -18,19 +18,22 @@ from langgraph.graph.state import CompiledStateGraph
|
||||
from langgraph.store.base import BaseStore
|
||||
from langgraph.types import Checkpointer
|
||||
|
||||
from deepagents.backends import StateBackend
|
||||
from deepagents.backends.protocol import BackendFactory, BackendProtocol
|
||||
from deepagents.middleware.filesystem import FilesystemMiddleware
|
||||
from deepagents.middleware.memory import MemoryMiddleware
|
||||
from deepagents.middleware.patch_tool_calls import PatchToolCallsMiddleware
|
||||
from deepagents.middleware.skills import SkillsMiddleware
|
||||
from deepagents.middleware.subagents import CompiledSubAgent, SubAgent, SubAgentMiddleware
|
||||
|
||||
BASE_AGENT_PROMPT = "In order to complete the objective that the user asks of you, you have access to a number of standard tools."
|
||||
|
||||
|
||||
def get_default_model() -> ChatAnthropic:
|
||||
"""Deep Agent를 위한 기본 모델을 반환합니다.
|
||||
"""Get the default model for deep agents.
|
||||
|
||||
Returns:
|
||||
Claude Sonnet 4로 구성된 ChatAnthropic 인스턴스.
|
||||
`ChatAnthropic` instance configured with Claude Sonnet 4.5.
|
||||
"""
|
||||
return ChatAnthropic(
|
||||
model_name="claude-sonnet-4-5-20250929",
|
||||
@@ -45,6 +48,8 @@ def create_deep_agent(
|
||||
system_prompt: str | None = None,
|
||||
middleware: Sequence[AgentMiddleware] = (),
|
||||
subagents: list[SubAgent | CompiledSubAgent] | None = None,
|
||||
skills: list[str] | None = None,
|
||||
memory: list[str] | None = None,
|
||||
response_format: ResponseFormat | None = None,
|
||||
context_schema: type[Any] | None = None,
|
||||
checkpointer: Checkpointer | None = None,
|
||||
@@ -55,40 +60,56 @@ def create_deep_agent(
|
||||
name: str | None = None,
|
||||
cache: BaseCache | None = None,
|
||||
) -> CompiledStateGraph:
|
||||
"""Deep Agent를 생성합니다.
|
||||
"""Create a deep agent.
|
||||
|
||||
이 에이전트는 기본적으로 할 일 목록 작성 도구(write_todos), 7가지 파일 및 실행 도구
|
||||
(ls, read_file, write_file, edit_file, glob, grep, execute), 그리고 하위 에이전트 호출 도구를 가집니다.
|
||||
This agent will by default have access to a tool to write todos (`write_todos`),
|
||||
seven file and execution tools: `ls`, `read_file`, `write_file`, `edit_file`, `glob`, `grep`, `execute`,
|
||||
and a tool to call subagents.
|
||||
|
||||
execute 도구는 백엔드가 SandboxBackendProtocol을 구현한 경우 쉘 명령을 실행할 수 있습니다.
|
||||
샌드박스 백엔드가 아닌 경우 execute 도구는 오류 메시지를 반환합니다.
|
||||
The `execute` tool allows running shell commands if the backend implements `SandboxBackendProtocol`.
|
||||
For non-sandbox backends, the `execute` tool will return an error message.
|
||||
|
||||
Args:
|
||||
model: 사용할 모델. 기본값은 Claude Sonnet 4입니다.
|
||||
tools: 에이전트가 접근할 수 있는 도구들입니다.
|
||||
system_prompt: 에이전트에게 제공할 추가 지침입니다. 시스템 프롬프트에 포함됩니다.
|
||||
middleware: 표준 미들웨어 이후에 적용할 추가 미들웨어입니다.
|
||||
subagents: 사용할 하위 에이전트 목록입니다. 각 하위 에이전트는 다음 키를 가진 딕셔너리여야 합니다:
|
||||
- `name`
|
||||
- `description` (메인 에이전트가 하위 에이전트 호출 여부를 결정할 때 사용)
|
||||
- `prompt` (하위 에이전트의 시스템 프롬프트로 사용)
|
||||
- (선택사항) `tools`
|
||||
- (선택사항) `model` (LanguageModelLike 인스턴스 또는 dict 설정)
|
||||
- (선택사항) `middleware` (List[AgentMiddleware])
|
||||
response_format: 에이전트에 사용할 구조화된 출력 응답 형식입니다.
|
||||
context_schema: Deep Agent의 스키마입니다.
|
||||
checkpointer: 실행 간 에이전트 상태를 유지하기 위한 선택적 체크포인터입니다.
|
||||
store: 영구 저장을 위한 선택적 저장소 (백엔드가 StoreBackend를 사용하는 경우 필수).
|
||||
backend: 파일 저장 및 실행을 위한 선택적 백엔드. Backend 인스턴스 또는
|
||||
`lambda rt: StateBackend(rt)`와 같은 호출 가능한 팩토리를 전달합니다. 실행 지원을 위해서는
|
||||
SandboxBackendProtocol을 구현하는 백엔드를 사용하십시오.
|
||||
interrupt_on: 도구 이름을 인터럽트 설정에 매핑하는 선택적 Dict[str, bool | InterruptOnConfig]입니다.
|
||||
debug: 디버그 모드 활성화 여부. create_agent로 전달됩니다.
|
||||
name: 에이전트의 이름. create_agent로 전달됩니다.
|
||||
cache: 에이전트에 사용할 캐시. create_agent로 전달됩니다.
|
||||
model: The model to use. Defaults to `claude-sonnet-4-5-20250929`.
|
||||
tools: The tools the agent should have access to.
|
||||
system_prompt: The additional instructions the agent should have. Will go in
|
||||
the system prompt.
|
||||
middleware: Additional middleware to apply after standard middleware.
|
||||
subagents: The subagents to use.
|
||||
|
||||
Each subagent should be a `dict` with the following keys:
|
||||
|
||||
- `name`
|
||||
- `description` (used by the main agent to decide whether to call the sub agent)
|
||||
- `prompt` (used as the system prompt in the subagent)
|
||||
- (optional) `tools`
|
||||
- (optional) `model` (either a `LanguageModelLike` instance or `dict` settings)
|
||||
- (optional) `middleware` (list of `AgentMiddleware`)
|
||||
skills: Optional list of skill source paths (e.g., `["/skills/user/", "/skills/project/"]`).
|
||||
|
||||
Paths must be specified using POSIX conventions (forward slashes) and are relative
|
||||
to the backend's root. When using `StateBackend` (default), provide skill files via
|
||||
`invoke(files={...})`. With `FilesystemBackend`, skills are loaded from disk relative
|
||||
to the backend's `root_dir`. Later sources override earlier ones for skills with the
|
||||
same name (last one wins).
|
||||
memory: Optional list of memory file paths (`AGENTS.md` files) to load
|
||||
(e.g., `["/memory/AGENTS.md"]`). Display names are automatically derived from paths.
|
||||
Memory is loaded at agent startup and added into the system prompt.
|
||||
response_format: A structured output response format to use for the agent.
|
||||
context_schema: The schema of the deep agent.
|
||||
checkpointer: Optional `Checkpointer` for persisting agent state between runs.
|
||||
store: Optional store for persistent storage (required if backend uses `StoreBackend`).
|
||||
backend: Optional backend for file storage and execution.
|
||||
|
||||
Pass either a `Backend` instance or a callable factory like `lambda rt: StateBackend(rt)`.
|
||||
For execution support, use a backend that implements `SandboxBackendProtocol`.
|
||||
interrupt_on: Mapping of tool names to interrupt configs.
|
||||
debug: Whether to enable debug mode. Passed through to `create_agent`.
|
||||
name: The name of the agent. Passed through to `create_agent`.
|
||||
cache: The cache to use for the agent. Passed through to `create_agent`.
|
||||
|
||||
Returns:
|
||||
구성된 Deep Agent.
|
||||
A configured deep agent.
|
||||
"""
|
||||
if model is None:
|
||||
model = get_default_model()
|
||||
@@ -107,37 +128,58 @@ def create_deep_agent(
|
||||
trigger = ("tokens", 170000)
|
||||
keep = ("messages", 6)
|
||||
|
||||
deepagent_middleware = [
|
||||
# Build middleware stack for subagents (includes skills if provided)
|
||||
subagent_middleware: list[AgentMiddleware] = [
|
||||
TodoListMiddleware(),
|
||||
FilesystemMiddleware(backend=backend),
|
||||
SubAgentMiddleware(
|
||||
default_model=model,
|
||||
default_tools=tools,
|
||||
subagents=subagents if subagents is not None else [],
|
||||
default_middleware=[
|
||||
TodoListMiddleware(),
|
||||
FilesystemMiddleware(backend=backend),
|
||||
SummarizationMiddleware(
|
||||
model=model,
|
||||
trigger=trigger,
|
||||
keep=keep,
|
||||
trim_tokens_to_summarize=None,
|
||||
),
|
||||
AnthropicPromptCachingMiddleware(unsupported_model_behavior="ignore"),
|
||||
PatchToolCallsMiddleware(),
|
||||
],
|
||||
default_interrupt_on=interrupt_on,
|
||||
general_purpose_agent=True,
|
||||
),
|
||||
SummarizationMiddleware(
|
||||
model=model,
|
||||
trigger=trigger,
|
||||
keep=keep,
|
||||
trim_tokens_to_summarize=None,
|
||||
),
|
||||
AnthropicPromptCachingMiddleware(unsupported_model_behavior="ignore"),
|
||||
PatchToolCallsMiddleware(),
|
||||
]
|
||||
|
||||
backend = backend if backend is not None else (lambda rt: StateBackend(rt))
|
||||
|
||||
if skills is not None:
|
||||
subagent_middleware.append(SkillsMiddleware(backend=backend, sources=skills))
|
||||
subagent_middleware.extend(
|
||||
[
|
||||
FilesystemMiddleware(backend=backend),
|
||||
SummarizationMiddleware(
|
||||
model=model,
|
||||
trigger=trigger,
|
||||
keep=keep,
|
||||
trim_tokens_to_summarize=None,
|
||||
),
|
||||
AnthropicPromptCachingMiddleware(unsupported_model_behavior="ignore"),
|
||||
PatchToolCallsMiddleware(),
|
||||
]
|
||||
)
|
||||
|
||||
# Build main agent middleware stack
|
||||
deepagent_middleware: list[AgentMiddleware] = [
|
||||
TodoListMiddleware(),
|
||||
]
|
||||
if memory is not None:
|
||||
deepagent_middleware.append(MemoryMiddleware(backend=backend, sources=memory))
|
||||
if skills is not None:
|
||||
deepagent_middleware.append(SkillsMiddleware(backend=backend, sources=skills))
|
||||
deepagent_middleware.extend(
|
||||
[
|
||||
FilesystemMiddleware(backend=backend),
|
||||
SubAgentMiddleware(
|
||||
default_model=model,
|
||||
default_tools=tools,
|
||||
subagents=subagents if subagents is not None else [],
|
||||
default_middleware=subagent_middleware,
|
||||
default_interrupt_on=interrupt_on,
|
||||
general_purpose_agent=True,
|
||||
),
|
||||
SummarizationMiddleware(
|
||||
model=model,
|
||||
trigger=trigger,
|
||||
keep=keep,
|
||||
trim_tokens_to_summarize=None,
|
||||
),
|
||||
AnthropicPromptCachingMiddleware(unsupported_model_behavior="ignore"),
|
||||
PatchToolCallsMiddleware(),
|
||||
]
|
||||
)
|
||||
if middleware:
|
||||
deepagent_middleware.extend(middleware)
|
||||
if interrupt_on is not None:
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
"""Middleware for the DeepAgent."""
|
||||
|
||||
from deepagents.middleware.filesystem import FilesystemMiddleware
|
||||
from deepagents.middleware.memory import MemoryMiddleware
|
||||
from deepagents.middleware.skills import SkillsMiddleware
|
||||
from deepagents.middleware.subagents import CompiledSubAgent, SubAgent, SubAgentMiddleware
|
||||
|
||||
__all__ = [
|
||||
"CompiledSubAgent",
|
||||
"FilesystemMiddleware",
|
||||
"MemoryMiddleware",
|
||||
"SkillsMiddleware",
|
||||
"SubAgent",
|
||||
"SubAgentMiddleware",
|
||||
]
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,407 @@
|
||||
"""Middleware for loading agent memory/context from AGENTS.md files.
|
||||
|
||||
This module implements support for the AGENTS.md specification (https://agents.md/),
|
||||
loading memory/context from configurable sources and injecting into the system prompt.
|
||||
|
||||
## Overview
|
||||
|
||||
AGENTS.md files provide project-specific context and instructions to help AI agents
|
||||
work effectively. Unlike skills (which are on-demand workflows), memory is always
|
||||
loaded and provides persistent context.
|
||||
|
||||
## Usage
|
||||
|
||||
```python
|
||||
from deepagents import MemoryMiddleware
|
||||
from deepagents.backends.filesystem import FilesystemBackend
|
||||
|
||||
# Security: FilesystemBackend allows reading/writing from the entire filesystem.
|
||||
# Either ensure the agent is running within a sandbox OR add human-in-the-loop (HIL)
|
||||
# approval to file operations.
|
||||
backend = FilesystemBackend(root_dir="/")
|
||||
|
||||
middleware = MemoryMiddleware(
|
||||
backend=backend,
|
||||
sources=[
|
||||
"~/.deepagents/AGENTS.md",
|
||||
"./.deepagents/AGENTS.md",
|
||||
],
|
||||
)
|
||||
|
||||
agent = create_deep_agent(middleware=[middleware])
|
||||
```
|
||||
|
||||
## Memory Sources
|
||||
|
||||
Sources are simply paths to AGENTS.md files that are loaded in order and combined.
|
||||
Multiple sources are concatenated in order, with all content included.
|
||||
Later sources appear after earlier ones in the combined prompt.
|
||||
|
||||
## File Format
|
||||
|
||||
AGENTS.md files are standard Markdown with no required structure.
|
||||
Common sections include:
|
||||
- Project overview
|
||||
- Build/test commands
|
||||
- Code style guidelines
|
||||
- Architecture notes
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from collections.abc import Awaitable, Callable
|
||||
from typing import TYPE_CHECKING, Annotated, NotRequired, TypedDict
|
||||
|
||||
from langchain.messages import SystemMessage
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from deepagents.backends.protocol import BACKEND_TYPES, BackendProtocol
|
||||
|
||||
from langchain.agents.middleware.types import (
|
||||
AgentMiddleware,
|
||||
AgentState,
|
||||
ModelRequest,
|
||||
ModelResponse,
|
||||
PrivateStateAttr,
|
||||
)
|
||||
from langchain.tools import ToolRuntime
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MemoryState(AgentState):
|
||||
"""State schema for MemoryMiddleware.
|
||||
|
||||
Attributes:
|
||||
memory_contents: Dict mapping source paths to their loaded content.
|
||||
Marked as private so it's not included in the final agent state.
|
||||
"""
|
||||
|
||||
memory_contents: NotRequired[Annotated[dict[str, str], PrivateStateAttr]]
|
||||
|
||||
|
||||
class MemoryStateUpdate(TypedDict):
|
||||
"""State update for MemoryMiddleware."""
|
||||
|
||||
memory_contents: dict[str, str]
|
||||
|
||||
|
||||
MEMORY_SYSTEM_PROMPT = """<agent_memory>
|
||||
{agent_memory}
|
||||
</agent_memory>
|
||||
|
||||
<memory_guidelines>
|
||||
The above <agent_memory> was loaded in from files in your filesystem. As you learn from your interactions with the user, you can save new knowledge by calling the `edit_file` tool.
|
||||
|
||||
**Learning from feedback:**
|
||||
- One of your MAIN PRIORITIES is to learn from your interactions with the user. These learnings can be implicit or explicit. This means that in the future, you will remember this important information.
|
||||
- When you need to remember something, updating memory must be your FIRST, IMMEDIATE action - before responding to the user, before calling other tools, before doing anything else. Just update memory immediately.
|
||||
- When user says something is better/worse, capture WHY and encode it as a pattern.
|
||||
- Each correction is a chance to improve permanently - don't just fix the immediate issue, update your instructions.
|
||||
- A great opportunity to update your memories is when the user interrupts a tool call and provides feedback. You should update your memories immediately before revising the tool call.
|
||||
- Look for the underlying principle behind corrections, not just the specific mistake.
|
||||
- The user might not explicitly ask you to remember something, but if they provide information that is useful for future use, you should update your memories immediately.
|
||||
|
||||
**Asking for information:**
|
||||
- If you lack context to perform an action (e.g. send a Slack DM, requires a user ID/email) you should explicitly ask the user for this information.
|
||||
- It is preferred for you to ask for information, don't assume anything that you do not know!
|
||||
- When the user provides information that is useful for future use, you should update your memories immediately.
|
||||
|
||||
**When to update memories:**
|
||||
- When the user explicitly asks you to remember something (e.g., "remember my email", "save this preference")
|
||||
- When the user describes your role or how you should behave (e.g., "you are a web researcher", "always do X")
|
||||
- When the user gives feedback on your work - capture what was wrong and how to improve
|
||||
- When the user provides information required for tool use (e.g., slack channel ID, email addresses)
|
||||
- When the user provides context useful for future tasks, such as how to use tools, or which actions to take in a particular situation
|
||||
- When you discover new patterns or preferences (coding styles, conventions, workflows)
|
||||
|
||||
**When to NOT update memories:**
|
||||
- When the information is temporary or transient (e.g., "I'm running late", "I'm on my phone right now")
|
||||
- When the information is a one-time task request (e.g., "Find me a recipe", "What's 25 * 4?")
|
||||
- When the information is a simple question that doesn't reveal lasting preferences (e.g., "What day is it?", "Can you explain X?")
|
||||
- When the information is an acknowledgment or small talk (e.g., "Sounds good!", "Hello", "Thanks for that")
|
||||
- When the information is stale or irrelevant in future conversations
|
||||
- Never store API keys, access tokens, passwords, or any other credentials in any file, memory, or system prompt.
|
||||
- If the user asks where to put API keys or provides an API key, do NOT echo or save it.
|
||||
|
||||
**Examples:**
|
||||
Example 1 (remembering user information):
|
||||
User: Can you connect to my google account?
|
||||
Agent: Sure, I'll connect to your google account, what's your google account email?
|
||||
User: john@example.com
|
||||
Agent: Let me save this to my memory.
|
||||
Tool Call: edit_file(...) -> remembers that the user's google account email is john@example.com
|
||||
|
||||
Example 2 (remembering implicit user preferences):
|
||||
User: Can you write me an example for creating a deep agent in LangChain?
|
||||
Agent: Sure, I'll write you an example for creating a deep agent in LangChain <example code in Python>
|
||||
User: Can you do this in JavaScript
|
||||
Agent: Let me save this to my memory.
|
||||
Tool Call: edit_file(...) -> remembers that the user prefers to get LangChaincode examples in JavaScript
|
||||
Agent: Sure, here is the JavaScript example<example code in JavaScript>
|
||||
|
||||
Example 3 (do not remember transient information):
|
||||
User: I'm going to play basketball tonight so I will be offline for a few hours.
|
||||
Agent: Okay I'll add a black to your calendar.
|
||||
Tool Call: create_calendar_event(...) -> just calls a tool, does not commit anything to memory, as it is transient information
|
||||
</memory_guidelines>
|
||||
"""
|
||||
|
||||
|
||||
class MemoryMiddleware(AgentMiddleware):
|
||||
"""Middleware for loading agent memory from AGENTS.md files.
|
||||
|
||||
Loads memory content from configured sources and injects into the system prompt.
|
||||
Supports multiple sources that are combined together.
|
||||
|
||||
Args:
|
||||
backend: Backend instance or factory function for file operations.
|
||||
sources: List of MemorySource configurations specifying paths and names.
|
||||
"""
|
||||
|
||||
state_schema = MemoryState
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
backend: BACKEND_TYPES,
|
||||
sources: list[str],
|
||||
) -> None:
|
||||
"""Initialize the memory middleware.
|
||||
|
||||
Args:
|
||||
backend: Backend instance or factory function that takes runtime
|
||||
and returns a backend. Use a factory for StateBackend.
|
||||
sources: List of memory file paths to load (e.g., ["~/.deepagents/AGENTS.md",
|
||||
"./.deepagents/AGENTS.md"]). Display names are automatically derived
|
||||
from the paths. Sources are loaded in order.
|
||||
"""
|
||||
self._backend = backend
|
||||
self.sources = sources
|
||||
|
||||
def _get_backend(self, state: MemoryState, runtime: Runtime, config: RunnableConfig) -> BackendProtocol:
|
||||
"""Resolve backend from instance or factory.
|
||||
|
||||
Args:
|
||||
state: Current agent state.
|
||||
runtime: Runtime context for factory functions.
|
||||
config: Runnable config to pass to backend factory.
|
||||
|
||||
Returns:
|
||||
Resolved backend instance.
|
||||
"""
|
||||
if callable(self._backend):
|
||||
# Construct an artificial tool runtime to resolve backend factory
|
||||
tool_runtime = ToolRuntime(
|
||||
state=state,
|
||||
context=runtime.context,
|
||||
stream_writer=runtime.stream_writer,
|
||||
store=runtime.store,
|
||||
config=config,
|
||||
tool_call_id=None,
|
||||
)
|
||||
return self._backend(tool_runtime)
|
||||
return self._backend
|
||||
|
||||
def _format_agent_memory(self, contents: dict[str, str]) -> str:
|
||||
"""Format memory with locations and contents paired together.
|
||||
|
||||
Args:
|
||||
contents: Dict mapping source paths to content.
|
||||
|
||||
Returns:
|
||||
Formatted string with location+content pairs wrapped in <agent_memory> tags.
|
||||
"""
|
||||
if not contents:
|
||||
return MEMORY_SYSTEM_PROMPT.format(agent_memory="(No memory loaded)")
|
||||
|
||||
sections = []
|
||||
for path in self.sources:
|
||||
if contents.get(path):
|
||||
sections.append(f"{path}\n{contents[path]}")
|
||||
|
||||
if not sections:
|
||||
return MEMORY_SYSTEM_PROMPT.format(agent_memory="(No memory loaded)")
|
||||
|
||||
memory_body = "\n\n".join(sections)
|
||||
return MEMORY_SYSTEM_PROMPT.format(agent_memory=memory_body)
|
||||
|
||||
async def _load_memory_from_backend(
|
||||
self,
|
||||
backend: BackendProtocol,
|
||||
path: str,
|
||||
) -> str | None:
|
||||
"""Load memory content from a backend path.
|
||||
|
||||
Args:
|
||||
backend: Backend to load from.
|
||||
path: Path to the AGENTS.md file.
|
||||
|
||||
Returns:
|
||||
File content if found, None otherwise.
|
||||
"""
|
||||
results = await backend.adownload_files([path])
|
||||
# Should get exactly one response for one path
|
||||
if len(results) != 1:
|
||||
raise AssertionError(f"Expected 1 response for path {path}, got {len(results)}")
|
||||
response = results[0]
|
||||
|
||||
if response.error is not None:
|
||||
# For now, memory files are treated as optional. file_not_found is expected
|
||||
# and we skip silently to allow graceful degradation.
|
||||
if response.error == "file_not_found":
|
||||
return None
|
||||
# Other errors should be raised
|
||||
raise ValueError(f"Failed to download {path}: {response.error}")
|
||||
|
||||
if response.content is not None:
|
||||
return response.content.decode("utf-8")
|
||||
|
||||
return None
|
||||
|
||||
def _load_memory_from_backend_sync(
|
||||
self,
|
||||
backend: BackendProtocol,
|
||||
path: str,
|
||||
) -> str | None:
|
||||
"""Load memory content from a backend path synchronously.
|
||||
|
||||
Args:
|
||||
backend: Backend to load from.
|
||||
path: Path to the AGENTS.md file.
|
||||
|
||||
Returns:
|
||||
File content if found, None otherwise.
|
||||
"""
|
||||
results = backend.download_files([path])
|
||||
# Should get exactly one response for one path
|
||||
if len(results) != 1:
|
||||
raise AssertionError(f"Expected 1 response for path {path}, got {len(results)}")
|
||||
response = results[0]
|
||||
|
||||
if response.error is not None:
|
||||
# For now, memory files are treated as optional. file_not_found is expected
|
||||
# and we skip silently to allow graceful degradation.
|
||||
if response.error == "file_not_found":
|
||||
return None
|
||||
# Other errors should be raised
|
||||
raise ValueError(f"Failed to download {path}: {response.error}")
|
||||
|
||||
if response.content is not None:
|
||||
return response.content.decode("utf-8")
|
||||
|
||||
return None
|
||||
|
||||
def before_agent(self, state: MemoryState, runtime: Runtime, config: RunnableConfig) -> MemoryStateUpdate | None:
|
||||
"""Load memory content before agent execution (synchronous).
|
||||
|
||||
Loads memory from all configured sources and stores in state.
|
||||
Only loads if not already present in state.
|
||||
|
||||
Args:
|
||||
state: Current agent state.
|
||||
runtime: Runtime context.
|
||||
config: Runnable config.
|
||||
|
||||
Returns:
|
||||
State update with memory_contents populated.
|
||||
"""
|
||||
# Skip if already loaded
|
||||
if "memory_contents" in state:
|
||||
return None
|
||||
|
||||
backend = self._get_backend(state, runtime, config)
|
||||
contents: dict[str, str] = {}
|
||||
|
||||
for path in self.sources:
|
||||
content = self._load_memory_from_backend_sync(backend, path)
|
||||
if content:
|
||||
contents[path] = content
|
||||
logger.debug(f"Loaded memory from: {path}")
|
||||
|
||||
return MemoryStateUpdate(memory_contents=contents)
|
||||
|
||||
async def abefore_agent(self, state: MemoryState, runtime: Runtime, config: RunnableConfig) -> MemoryStateUpdate | None:
|
||||
"""Load memory content before agent execution.
|
||||
|
||||
Loads memory from all configured sources and stores in state.
|
||||
Only loads if not already present in state.
|
||||
|
||||
Args:
|
||||
state: Current agent state.
|
||||
runtime: Runtime context.
|
||||
config: Runnable config.
|
||||
|
||||
Returns:
|
||||
State update with memory_contents populated.
|
||||
"""
|
||||
# Skip if already loaded
|
||||
if "memory_contents" in state:
|
||||
return None
|
||||
|
||||
backend = self._get_backend(state, runtime, config)
|
||||
contents: dict[str, str] = {}
|
||||
|
||||
for path in self.sources:
|
||||
content = await self._load_memory_from_backend(backend, path)
|
||||
if content:
|
||||
contents[path] = content
|
||||
logger.debug(f"Loaded memory from: {path}")
|
||||
|
||||
return MemoryStateUpdate(memory_contents=contents)
|
||||
|
||||
def modify_request(self, request: ModelRequest) -> ModelRequest:
|
||||
"""Inject memory content into the system prompt.
|
||||
|
||||
Args:
|
||||
request: Model request to modify.
|
||||
|
||||
Returns:
|
||||
Modified request with memory injected into system prompt.
|
||||
"""
|
||||
contents = request.state.get("memory_contents", {})
|
||||
agent_memory = self._format_agent_memory(contents)
|
||||
|
||||
if request.system_prompt:
|
||||
system_prompt = agent_memory + "\n\n" + request.system_prompt
|
||||
else:
|
||||
system_prompt = agent_memory
|
||||
|
||||
return request.override(system_message=SystemMessage(system_prompt))
|
||||
|
||||
def wrap_model_call(
|
||||
self,
|
||||
request: ModelRequest,
|
||||
handler: Callable[[ModelRequest], ModelResponse],
|
||||
) -> ModelResponse:
|
||||
"""Wrap model call to inject memory into system prompt.
|
||||
|
||||
Args:
|
||||
request: Model request being processed.
|
||||
handler: Handler function to call with modified request.
|
||||
|
||||
Returns:
|
||||
Model response from handler.
|
||||
"""
|
||||
modified_request = self.modify_request(request)
|
||||
return handler(modified_request)
|
||||
|
||||
async def awrap_model_call(
|
||||
self,
|
||||
request: ModelRequest,
|
||||
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
|
||||
) -> ModelResponse:
|
||||
"""Async wrap model call to inject memory into system prompt.
|
||||
|
||||
Args:
|
||||
request: Model request being processed.
|
||||
handler: Async handler function to call with modified request.
|
||||
|
||||
Returns:
|
||||
Model response from handler.
|
||||
"""
|
||||
modified_request = self.modify_request(request)
|
||||
return await handler(modified_request)
|
||||
@@ -9,10 +9,10 @@ from langgraph.types import Overwrite
|
||||
|
||||
|
||||
class PatchToolCallsMiddleware(AgentMiddleware):
|
||||
"""메시지 기록에서 댕글링(dangling) 도구 호출을 패치하는 미들웨어."""
|
||||
"""Middleware to patch dangling tool calls in the messages history."""
|
||||
|
||||
def before_agent(self, state: AgentState, runtime: Runtime[Any]) -> dict[str, Any] | None: # noqa: ARG002
|
||||
"""에이전트가 실행되기 전에 AIMessage의 댕글링 도구 호출을 처리합니다."""
|
||||
"""Before the agent runs, handle dangling tool calls from any AIMessage."""
|
||||
messages = state["messages"]
|
||||
if not messages or len(messages) == 0:
|
||||
return None
|
||||
@@ -30,8 +30,8 @@ class PatchToolCallsMiddleware(AgentMiddleware):
|
||||
if corresponding_tool_msg is None:
|
||||
# We have a dangling tool call which needs a ToolMessage
|
||||
tool_msg = (
|
||||
f"도구 호출 {tool_call['name']} (ID: {tool_call['id']})이 취소되었습니다 - "
|
||||
"완료되기 전에 다른 메시지가 도착했습니다."
|
||||
f"Tool call {tool_call['name']} with id {tool_call['id']} was "
|
||||
"cancelled - another message came in before it could be completed."
|
||||
)
|
||||
patched_messages.append(
|
||||
ToolMessage(
|
||||
|
||||
@@ -0,0 +1,695 @@
|
||||
"""Skills middleware for loading and exposing agent skills to the system prompt.
|
||||
|
||||
This module implements Anthropic's agent skills pattern with progressive disclosure,
|
||||
loading skills from backend storage via configurable sources.
|
||||
|
||||
## Architecture
|
||||
|
||||
Skills are loaded from one or more **sources** - paths in a backend where skills are
|
||||
organized. Sources are loaded in order, with later sources overriding earlier ones
|
||||
when skills have the same name (last one wins). This enables layering: base -> user
|
||||
-> project -> team skills.
|
||||
|
||||
The middleware uses backend APIs exclusively (no direct filesystem access), making it
|
||||
portable across different storage backends (filesystem, state, remote storage, etc.).
|
||||
|
||||
For StateBackend (ephemeral/in-memory), use a factory function:
|
||||
```python
|
||||
SkillsMiddleware(backend=lambda rt: StateBackend(rt), ...)
|
||||
```
|
||||
|
||||
## Skill Structure
|
||||
|
||||
Each skill is a directory containing a SKILL.md file with YAML frontmatter:
|
||||
|
||||
```
|
||||
/skills/user/web-research/
|
||||
├── SKILL.md # Required: YAML frontmatter + markdown instructions
|
||||
└── helper.py # Optional: supporting files
|
||||
```
|
||||
|
||||
SKILL.md format:
|
||||
```markdown
|
||||
---
|
||||
name: web-research
|
||||
description: Structured approach to conducting thorough web research
|
||||
license: MIT
|
||||
---
|
||||
|
||||
# Web Research Skill
|
||||
|
||||
## When to Use
|
||||
- User asks you to research a topic
|
||||
...
|
||||
```
|
||||
|
||||
## Skill Metadata (SkillMetadata)
|
||||
|
||||
Parsed from YAML frontmatter per Agent Skills specification:
|
||||
- `name`: Skill identifier (max 64 chars, lowercase alphanumeric and hyphens)
|
||||
- `description`: What the skill does (max 1024 chars)
|
||||
- `path`: Backend path to the SKILL.md file
|
||||
- Optional: `license`, `compatibility`, `metadata`, `allowed_tools`
|
||||
|
||||
## Sources
|
||||
|
||||
Sources are simply paths to skill directories in the backend. The source name is
|
||||
derived from the last component of the path (e.g., "/skills/user/" -> "user").
|
||||
|
||||
Example sources:
|
||||
```python
|
||||
[
|
||||
"/skills/user/",
|
||||
"/skills/project/"
|
||||
]
|
||||
```
|
||||
|
||||
## Path Conventions
|
||||
|
||||
All paths use POSIX conventions (forward slashes) via `PurePosixPath`:
|
||||
- Backend paths: "/skills/user/web-research/SKILL.md"
|
||||
- Virtual, platform-independent
|
||||
- Backends handle platform-specific conversions as needed
|
||||
|
||||
## Usage
|
||||
|
||||
```python
|
||||
from deepagents.backends.state import StateBackend
|
||||
from deepagents.middleware.skills import SkillsMiddleware
|
||||
|
||||
middleware = SkillsMiddleware(
|
||||
backend=my_backend,
|
||||
sources=[
|
||||
"/skills/base/",
|
||||
"/skills/user/",
|
||||
"/skills/project/",
|
||||
],
|
||||
)
|
||||
```
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
from pathlib import PurePosixPath
|
||||
from typing import TYPE_CHECKING, Annotated
|
||||
|
||||
import yaml
|
||||
from langchain.agents.middleware.types import PrivateStateAttr
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from deepagents.backends.protocol import BACKEND_TYPES, BackendProtocol
|
||||
|
||||
from collections.abc import Awaitable, Callable
|
||||
from typing import NotRequired, TypedDict
|
||||
|
||||
from langchain.agents.middleware.types import (
|
||||
AgentMiddleware,
|
||||
AgentState,
|
||||
ModelRequest,
|
||||
ModelResponse,
|
||||
)
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
from langgraph.prebuilt import ToolRuntime
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Security: Maximum size for SKILL.md files to prevent DoS attacks (10MB)
|
||||
MAX_SKILL_FILE_SIZE = 10 * 1024 * 1024
|
||||
|
||||
# Agent Skills specification constraints (https://agentskills.io/specification)
|
||||
MAX_SKILL_NAME_LENGTH = 64
|
||||
MAX_SKILL_DESCRIPTION_LENGTH = 1024
|
||||
|
||||
|
||||
class SkillMetadata(TypedDict):
|
||||
"""Metadata for a skill per Agent Skills specification (https://agentskills.io/specification)."""
|
||||
|
||||
name: str
|
||||
"""Skill identifier (max 64 chars, lowercase alphanumeric and hyphens)."""
|
||||
|
||||
description: str
|
||||
"""What the skill does (max 1024 chars)."""
|
||||
|
||||
path: str
|
||||
"""Path to the SKILL.md file."""
|
||||
|
||||
license: str | None
|
||||
"""License name or reference to bundled license file."""
|
||||
|
||||
compatibility: str | None
|
||||
"""Environment requirements (max 500 chars)."""
|
||||
|
||||
metadata: dict[str, str]
|
||||
"""Arbitrary key-value mapping for additional metadata."""
|
||||
|
||||
allowed_tools: list[str]
|
||||
"""Space-delimited list of pre-approved tools. (Experimental)"""
|
||||
|
||||
|
||||
class SkillsState(AgentState):
|
||||
"""State for the skills middleware."""
|
||||
|
||||
skills_metadata: NotRequired[Annotated[list[SkillMetadata], PrivateStateAttr]]
|
||||
"""List of loaded skill metadata from all configured sources."""
|
||||
|
||||
|
||||
class SkillsStateUpdate(TypedDict):
|
||||
"""State update for the skills middleware."""
|
||||
|
||||
skills_metadata: list[SkillMetadata]
|
||||
"""List of loaded skill metadata to merge into state."""
|
||||
|
||||
|
||||
def _validate_skill_name(name: str, directory_name: str) -> tuple[bool, str]:
|
||||
"""Validate skill name per Agent Skills specification.
|
||||
|
||||
Requirements per spec:
|
||||
- Max 64 characters
|
||||
- Lowercase alphanumeric and hyphens only (a-z, 0-9, -)
|
||||
- Cannot start or end with hyphen
|
||||
- No consecutive hyphens
|
||||
- Must match parent directory name
|
||||
|
||||
Args:
|
||||
name: Skill name from YAML frontmatter
|
||||
directory_name: Parent directory name
|
||||
|
||||
Returns:
|
||||
(is_valid, error_message) tuple. Error message is empty if valid.
|
||||
"""
|
||||
if not name:
|
||||
return False, "name is required"
|
||||
if len(name) > MAX_SKILL_NAME_LENGTH:
|
||||
return False, "name exceeds 64 characters"
|
||||
# Pattern: lowercase alphanumeric, single hyphens between segments, no start/end hyphen
|
||||
if not re.match(r"^[a-z0-9]+(-[a-z0-9]+)*$", name):
|
||||
return False, "name must be lowercase alphanumeric with single hyphens only"
|
||||
if name != directory_name:
|
||||
return False, f"name '{name}' must match directory name '{directory_name}'"
|
||||
return True, ""
|
||||
|
||||
|
||||
def _parse_skill_metadata(
|
||||
content: str,
|
||||
skill_path: str,
|
||||
directory_name: str,
|
||||
) -> SkillMetadata | None:
|
||||
"""Parse YAML frontmatter from SKILL.md content.
|
||||
|
||||
Extracts metadata per Agent Skills specification from YAML frontmatter delimited
|
||||
by --- markers at the start of the content.
|
||||
|
||||
Args:
|
||||
content: Content of the SKILL.md file
|
||||
skill_path: Path to the SKILL.md file (for error messages and metadata)
|
||||
directory_name: Name of the parent directory containing the skill
|
||||
|
||||
Returns:
|
||||
SkillMetadata if parsing succeeds, None if parsing fails or validation errors occur
|
||||
"""
|
||||
if len(content) > MAX_SKILL_FILE_SIZE:
|
||||
logger.warning("Skipping %s: content too large (%d bytes)", skill_path, len(content))
|
||||
return None
|
||||
|
||||
# Match YAML frontmatter between --- delimiters
|
||||
frontmatter_pattern = r"^---\s*\n(.*?)\n---\s*\n"
|
||||
match = re.match(frontmatter_pattern, content, re.DOTALL)
|
||||
|
||||
if not match:
|
||||
logger.warning("Skipping %s: no valid YAML frontmatter found", skill_path)
|
||||
return None
|
||||
|
||||
frontmatter_str = match.group(1)
|
||||
|
||||
# Parse YAML using safe_load for proper nested structure support
|
||||
try:
|
||||
frontmatter_data = yaml.safe_load(frontmatter_str)
|
||||
except yaml.YAMLError as e:
|
||||
logger.warning("Invalid YAML in %s: %s", skill_path, e)
|
||||
return None
|
||||
|
||||
if not isinstance(frontmatter_data, dict):
|
||||
logger.warning("Skipping %s: frontmatter is not a mapping", skill_path)
|
||||
return None
|
||||
|
||||
# Validate required fields
|
||||
name = frontmatter_data.get("name")
|
||||
description = frontmatter_data.get("description")
|
||||
|
||||
if not name or not description:
|
||||
logger.warning("Skipping %s: missing required 'name' or 'description'", skill_path)
|
||||
return None
|
||||
|
||||
# Validate name format per spec (warn but continue loading for backwards compatibility)
|
||||
is_valid, error = _validate_skill_name(str(name), directory_name)
|
||||
if not is_valid:
|
||||
logger.warning(
|
||||
"Skill '%s' in %s does not follow Agent Skills specification: %s. Consider renaming for spec compliance.",
|
||||
name,
|
||||
skill_path,
|
||||
error,
|
||||
)
|
||||
|
||||
# Validate description length per spec (max 1024 chars)
|
||||
description_str = str(description).strip()
|
||||
if len(description_str) > MAX_SKILL_DESCRIPTION_LENGTH:
|
||||
logger.warning(
|
||||
"Description exceeds %d characters in %s, truncating",
|
||||
MAX_SKILL_DESCRIPTION_LENGTH,
|
||||
skill_path,
|
||||
)
|
||||
description_str = description_str[:MAX_SKILL_DESCRIPTION_LENGTH]
|
||||
|
||||
if frontmatter_data.get("allowed-tools"):
|
||||
allowed_tools = frontmatter_data.get("allowed-tools").split(" ")
|
||||
else:
|
||||
allowed_tools = []
|
||||
|
||||
return SkillMetadata(
|
||||
name=str(name),
|
||||
description=description_str,
|
||||
path=skill_path,
|
||||
metadata=frontmatter_data.get("metadata", {}),
|
||||
license=frontmatter_data.get("license", "").strip() or None,
|
||||
compatibility=frontmatter_data.get("compatibility", "").strip() or None,
|
||||
allowed_tools=allowed_tools,
|
||||
)
|
||||
|
||||
|
||||
def _list_skills(backend: BackendProtocol, source_path: str) -> list[SkillMetadata]:
|
||||
"""List all skills from a backend source.
|
||||
|
||||
Scans backend for subdirectories containing SKILL.md files, downloads their content,
|
||||
parses YAML frontmatter, and returns skill metadata.
|
||||
|
||||
Expected structure:
|
||||
source_path/
|
||||
├── skill-name/
|
||||
│ ├── SKILL.md # Required
|
||||
│ └── helper.py # Optional
|
||||
|
||||
Args:
|
||||
backend: Backend instance to use for file operations
|
||||
source_path: Path to the skills directory in the backend
|
||||
|
||||
Returns:
|
||||
List of skill metadata from successfully parsed SKILL.md files
|
||||
"""
|
||||
base_path = source_path
|
||||
|
||||
skills: list[SkillMetadata] = []
|
||||
items = backend.ls_info(base_path)
|
||||
# Find all skill directories (directories containing SKILL.md)
|
||||
skill_dirs = []
|
||||
for item in items:
|
||||
if not item.get("is_dir"):
|
||||
continue
|
||||
skill_dirs.append(item["path"])
|
||||
|
||||
if not skill_dirs:
|
||||
return []
|
||||
|
||||
# For each skill directory, check if SKILL.md exists and download it
|
||||
skill_md_paths = []
|
||||
for skill_dir_path in skill_dirs:
|
||||
# Construct SKILL.md path using PurePosixPath for safe, standardized path operations
|
||||
skill_dir = PurePosixPath(skill_dir_path)
|
||||
skill_md_path = str(skill_dir / "SKILL.md")
|
||||
skill_md_paths.append((skill_dir_path, skill_md_path))
|
||||
|
||||
paths_to_download = [skill_md_path for _, skill_md_path in skill_md_paths]
|
||||
responses = backend.download_files(paths_to_download)
|
||||
|
||||
# Parse each downloaded SKILL.md
|
||||
for (skill_dir_path, skill_md_path), response in zip(skill_md_paths, responses, strict=True):
|
||||
if response.error:
|
||||
# Skill doesn't have a SKILL.md, skip it
|
||||
continue
|
||||
|
||||
if response.content is None:
|
||||
logger.warning("Downloaded skill file %s has no content", skill_md_path)
|
||||
continue
|
||||
|
||||
try:
|
||||
content = response.content.decode("utf-8")
|
||||
except UnicodeDecodeError as e:
|
||||
logger.warning("Error decoding %s: %s", skill_md_path, e)
|
||||
continue
|
||||
|
||||
# Extract directory name from path using PurePosixPath
|
||||
directory_name = PurePosixPath(skill_dir_path).name
|
||||
|
||||
# Parse metadata
|
||||
skill_metadata = _parse_skill_metadata(
|
||||
content=content,
|
||||
skill_path=skill_md_path,
|
||||
directory_name=directory_name,
|
||||
)
|
||||
if skill_metadata:
|
||||
skills.append(skill_metadata)
|
||||
|
||||
return skills
|
||||
|
||||
|
||||
async def _alist_skills(backend: BackendProtocol, source_path: str) -> list[SkillMetadata]:
|
||||
"""List all skills from a backend source (async version).
|
||||
|
||||
Scans backend for subdirectories containing SKILL.md files, downloads their content,
|
||||
parses YAML frontmatter, and returns skill metadata.
|
||||
|
||||
Expected structure:
|
||||
source_path/
|
||||
├── skill-name/
|
||||
│ ├── SKILL.md # Required
|
||||
│ └── helper.py # Optional
|
||||
|
||||
Args:
|
||||
backend: Backend instance to use for file operations
|
||||
source_path: Path to the skills directory in the backend
|
||||
|
||||
Returns:
|
||||
List of skill metadata from successfully parsed SKILL.md files
|
||||
"""
|
||||
base_path = source_path
|
||||
|
||||
skills: list[SkillMetadata] = []
|
||||
items = await backend.als_info(base_path)
|
||||
# Find all skill directories (directories containing SKILL.md)
|
||||
skill_dirs = []
|
||||
for item in items:
|
||||
if not item.get("is_dir"):
|
||||
continue
|
||||
skill_dirs.append(item["path"])
|
||||
|
||||
if not skill_dirs:
|
||||
return []
|
||||
|
||||
# For each skill directory, check if SKILL.md exists and download it
|
||||
skill_md_paths = []
|
||||
for skill_dir_path in skill_dirs:
|
||||
# Construct SKILL.md path using PurePosixPath for safe, standardized path operations
|
||||
skill_dir = PurePosixPath(skill_dir_path)
|
||||
skill_md_path = str(skill_dir / "SKILL.md")
|
||||
skill_md_paths.append((skill_dir_path, skill_md_path))
|
||||
|
||||
paths_to_download = [skill_md_path for _, skill_md_path in skill_md_paths]
|
||||
responses = await backend.adownload_files(paths_to_download)
|
||||
|
||||
# Parse each downloaded SKILL.md
|
||||
for (skill_dir_path, skill_md_path), response in zip(skill_md_paths, responses, strict=True):
|
||||
if response.error:
|
||||
# Skill doesn't have a SKILL.md, skip it
|
||||
continue
|
||||
|
||||
if response.content is None:
|
||||
logger.warning("Downloaded skill file %s has no content", skill_md_path)
|
||||
continue
|
||||
|
||||
try:
|
||||
content = response.content.decode("utf-8")
|
||||
except UnicodeDecodeError as e:
|
||||
logger.warning("Error decoding %s: %s", skill_md_path, e)
|
||||
continue
|
||||
|
||||
# Extract directory name from path using PurePosixPath
|
||||
directory_name = PurePosixPath(skill_dir_path).name
|
||||
|
||||
# Parse metadata
|
||||
skill_metadata = _parse_skill_metadata(
|
||||
content=content,
|
||||
skill_path=skill_md_path,
|
||||
directory_name=directory_name,
|
||||
)
|
||||
if skill_metadata:
|
||||
skills.append(skill_metadata)
|
||||
|
||||
return skills
|
||||
|
||||
|
||||
SKILLS_SYSTEM_PROMPT = """
|
||||
|
||||
## Skills System
|
||||
|
||||
You have access to a skills library that provides specialized capabilities and domain knowledge.
|
||||
|
||||
{skills_locations}
|
||||
|
||||
**Available Skills:**
|
||||
|
||||
{skills_list}
|
||||
|
||||
**How to Use Skills (Progressive Disclosure):**
|
||||
|
||||
Skills follow a **progressive disclosure** pattern - you see their name and description above, but only read full instructions when needed:
|
||||
|
||||
1. **Recognize when a skill applies**: Check if the user's task matches a skill's description
|
||||
2. **Read the skill's full instructions**: Use the path shown in the skill list above
|
||||
3. **Follow the skill's instructions**: SKILL.md contains step-by-step workflows, best practices, and examples
|
||||
4. **Access supporting files**: Skills may include helper scripts, configs, or reference docs - use absolute paths
|
||||
|
||||
**When to Use Skills:**
|
||||
- User's request matches a skill's domain (e.g., "research X" -> web-research skill)
|
||||
- You need specialized knowledge or structured workflows
|
||||
- A skill provides proven patterns for complex tasks
|
||||
|
||||
**Executing Skill Scripts:**
|
||||
Skills may contain Python scripts or other executable files. Always use absolute paths from the skill list.
|
||||
|
||||
**Example Workflow:**
|
||||
|
||||
User: "Can you research the latest developments in quantum computing?"
|
||||
|
||||
1. Check available skills -> See "web-research" skill with its path
|
||||
2. Read the skill using the path shown
|
||||
3. Follow the skill's research workflow (search -> organize -> synthesize)
|
||||
4. Use any helper scripts with absolute paths
|
||||
|
||||
Remember: Skills make you more capable and consistent. When in doubt, check if a skill exists for the task!
|
||||
"""
|
||||
|
||||
|
||||
class SkillsMiddleware(AgentMiddleware):
|
||||
"""Middleware for loading and exposing agent skills to the system prompt.
|
||||
|
||||
Loads skills from backend sources and injects them into the system prompt
|
||||
using progressive disclosure (metadata first, full content on demand).
|
||||
|
||||
Skills are loaded in source order with later sources overriding earlier ones.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from deepagents.backends.filesystem import FilesystemBackend
|
||||
|
||||
backend = FilesystemBackend(root_dir="/path/to/skills")
|
||||
middleware = SkillsMiddleware(
|
||||
backend=backend,
|
||||
sources=[
|
||||
"/path/to/skills/user/",
|
||||
"/path/to/skills/project/",
|
||||
],
|
||||
)
|
||||
```
|
||||
|
||||
Args:
|
||||
backend: Backend instance for file operations
|
||||
sources: List of skill source paths. Source names are derived from the last path component.
|
||||
"""
|
||||
|
||||
state_schema = SkillsState
|
||||
|
||||
def __init__(self, *, backend: BACKEND_TYPES, sources: list[str]) -> None:
|
||||
"""Initialize the skills middleware.
|
||||
|
||||
Args:
|
||||
backend: Backend instance or factory function that takes runtime and returns a backend.
|
||||
Use a factory for StateBackend: `lambda rt: StateBackend(rt)`
|
||||
sources: List of skill source paths (e.g., ["/skills/user/", "/skills/project/"]).
|
||||
"""
|
||||
self._backend = backend
|
||||
self.sources = sources
|
||||
self.system_prompt_template = SKILLS_SYSTEM_PROMPT
|
||||
|
||||
def _get_backend(self, state: SkillsState, runtime: Runtime, config: RunnableConfig) -> BackendProtocol:
|
||||
"""Resolve backend from instance or factory.
|
||||
|
||||
Args:
|
||||
state: Current agent state.
|
||||
runtime: Runtime context for factory functions.
|
||||
config: Runnable config to pass to backend factory.
|
||||
|
||||
Returns:
|
||||
Resolved backend instance
|
||||
"""
|
||||
if callable(self._backend):
|
||||
# Construct an artificial tool runtime to resolve backend factory
|
||||
tool_runtime = ToolRuntime(
|
||||
state=state,
|
||||
context=runtime.context,
|
||||
stream_writer=runtime.stream_writer,
|
||||
store=runtime.store,
|
||||
config=config,
|
||||
tool_call_id=None,
|
||||
)
|
||||
backend = self._backend(tool_runtime)
|
||||
if backend is None:
|
||||
raise AssertionError("SkillsMiddleware requires a valid backend instance")
|
||||
return backend
|
||||
|
||||
return self._backend
|
||||
|
||||
def _format_skills_locations(self) -> str:
|
||||
"""Format skills locations for display in system prompt."""
|
||||
locations = []
|
||||
for i, source_path in enumerate(self.sources):
|
||||
name = PurePosixPath(source_path.rstrip("/")).name.capitalize()
|
||||
suffix = " (higher priority)" if i == len(self.sources) - 1 else ""
|
||||
locations.append(f"**{name} Skills**: `{source_path}`{suffix}")
|
||||
return "\n".join(locations)
|
||||
|
||||
def _format_skills_list(self, skills: list[SkillMetadata]) -> str:
|
||||
"""Format skills metadata for display in system prompt."""
|
||||
if not skills:
|
||||
paths = [f"{source_path}" for source_path in self.sources]
|
||||
return f"(No skills available yet. You can create skills in {' or '.join(paths)})"
|
||||
|
||||
lines = []
|
||||
for skill in skills:
|
||||
lines.append(f"- **{skill['name']}**: {skill['description']}")
|
||||
lines.append(f" -> Read `{skill['path']}` for full instructions")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def modify_request(self, request: ModelRequest) -> ModelRequest:
|
||||
"""Inject skills documentation into a model request's system prompt.
|
||||
|
||||
Args:
|
||||
request: Model request to modify
|
||||
|
||||
Returns:
|
||||
New model request with skills documentation injected into system prompt
|
||||
"""
|
||||
skills_metadata = request.state.get("skills_metadata", [])
|
||||
skills_locations = self._format_skills_locations()
|
||||
skills_list = self._format_skills_list(skills_metadata)
|
||||
|
||||
skills_section = self.system_prompt_template.format(
|
||||
skills_locations=skills_locations,
|
||||
skills_list=skills_list,
|
||||
)
|
||||
|
||||
if request.system_prompt:
|
||||
system_prompt = request.system_prompt + "\n\n" + skills_section
|
||||
else:
|
||||
system_prompt = skills_section
|
||||
|
||||
return request.override(system_prompt=system_prompt)
|
||||
|
||||
def before_agent(self, state: SkillsState, runtime: Runtime, config: RunnableConfig) -> SkillsStateUpdate | None:
|
||||
"""Load skills metadata before agent execution (synchronous).
|
||||
|
||||
Runs before each agent interaction to discover available skills from all
|
||||
configured sources. Re-loads on every call to capture any changes.
|
||||
|
||||
Skills are loaded in source order with later sources overriding
|
||||
earlier ones if they contain skills with the same name (last one wins).
|
||||
|
||||
Args:
|
||||
state: Current agent state.
|
||||
runtime: Runtime context.
|
||||
config: Runnable config.
|
||||
|
||||
Returns:
|
||||
State update with skills_metadata populated, or None if already present
|
||||
"""
|
||||
# Skip if skills_metadata is already present in state (even if empty)
|
||||
if "skills_metadata" in state:
|
||||
return None
|
||||
|
||||
# Resolve backend (supports both direct instances and factory functions)
|
||||
backend = self._get_backend(state, runtime, config)
|
||||
all_skills: dict[str, SkillMetadata] = {}
|
||||
|
||||
# Load skills from each source in order
|
||||
# Later sources override earlier ones (last one wins)
|
||||
for source_path in self.sources:
|
||||
source_skills = _list_skills(backend, source_path)
|
||||
for skill in source_skills:
|
||||
all_skills[skill["name"]] = skill
|
||||
|
||||
skills = list(all_skills.values())
|
||||
return SkillsStateUpdate(skills_metadata=skills)
|
||||
|
||||
async def abefore_agent(self, state: SkillsState, runtime: Runtime, config: RunnableConfig) -> SkillsStateUpdate | None:
|
||||
"""Load skills metadata before agent execution (async).
|
||||
|
||||
Runs before each agent interaction to discover available skills from all
|
||||
configured sources. Re-loads on every call to capture any changes.
|
||||
|
||||
Skills are loaded in source order with later sources overriding
|
||||
earlier ones if they contain skills with the same name (last one wins).
|
||||
|
||||
Args:
|
||||
state: Current agent state.
|
||||
runtime: Runtime context.
|
||||
config: Runnable config.
|
||||
|
||||
Returns:
|
||||
State update with skills_metadata populated, or None if already present
|
||||
"""
|
||||
# Skip if skills_metadata is already present in state (even if empty)
|
||||
if "skills_metadata" in state:
|
||||
return None
|
||||
|
||||
# Resolve backend (supports both direct instances and factory functions)
|
||||
backend = self._get_backend(state, runtime, config)
|
||||
all_skills: dict[str, SkillMetadata] = {}
|
||||
|
||||
# Load skills from each source in order
|
||||
# Later sources override earlier ones (last one wins)
|
||||
for source_path in self.sources:
|
||||
source_skills = await _alist_skills(backend, source_path)
|
||||
for skill in source_skills:
|
||||
all_skills[skill["name"]] = skill
|
||||
|
||||
skills = list(all_skills.values())
|
||||
return SkillsStateUpdate(skills_metadata=skills)
|
||||
|
||||
def wrap_model_call(
|
||||
self,
|
||||
request: ModelRequest,
|
||||
handler: Callable[[ModelRequest], ModelResponse],
|
||||
) -> ModelResponse:
|
||||
"""Inject skills documentation into the system prompt.
|
||||
|
||||
Args:
|
||||
request: Model request being processed
|
||||
handler: Handler function to call with modified request
|
||||
|
||||
Returns:
|
||||
Model response from handler
|
||||
"""
|
||||
modified_request = self.modify_request(request)
|
||||
return handler(modified_request)
|
||||
|
||||
async def awrap_model_call(
|
||||
self,
|
||||
request: ModelRequest,
|
||||
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
|
||||
) -> ModelResponse:
|
||||
"""Inject skills documentation into the system prompt (async version).
|
||||
|
||||
Args:
|
||||
request: Model request being processed
|
||||
handler: Async handler function to call with modified request
|
||||
|
||||
Returns:
|
||||
Model response from handler
|
||||
"""
|
||||
modified_request = self.modify_request(request)
|
||||
return await handler(modified_request)
|
||||
|
||||
|
||||
__all__ = ["SkillMetadata", "SkillsMiddleware"]
|
||||
@@ -15,50 +15,50 @@ from langgraph.types import Command
|
||||
|
||||
|
||||
class SubAgent(TypedDict):
|
||||
"""에이전트에 대한 사양(Specification)입니다.
|
||||
"""Specification for an agent.
|
||||
|
||||
사용자 정의 에이전트를 지정할 때, `SubAgentMiddleware`의 `default_middleware`가
|
||||
먼저 적용되고, 그 다음에 이 사양에 지정된 `middleware`가 적용됩니다.
|
||||
기본값을 제외하고 사용자 정의 미들웨어만 사용하려면, `SubAgentMiddleware`에
|
||||
`default_middleware=[]`를 전달하십시오.
|
||||
When specifying custom agents, the `default_middleware` from `SubAgentMiddleware`
|
||||
will be applied first, followed by any `middleware` specified in this spec.
|
||||
To use only custom middleware without the defaults, pass `default_middleware=[]`
|
||||
to `SubAgentMiddleware`.
|
||||
"""
|
||||
|
||||
name: str
|
||||
"""에이전트의 이름."""
|
||||
"""The name of the agent."""
|
||||
|
||||
description: str
|
||||
"""에이전트의 설명."""
|
||||
"""The description of the agent."""
|
||||
|
||||
system_prompt: str
|
||||
"""에이전트에 사용할 시스템 프롬프트."""
|
||||
"""The system prompt to use for the agent."""
|
||||
|
||||
tools: Sequence[BaseTool | Callable | dict[str, Any]]
|
||||
"""에이전트에 사용할 도구들."""
|
||||
"""The tools to use for the agent."""
|
||||
|
||||
model: NotRequired[str | BaseChatModel]
|
||||
"""에이전트의 모델. 기본값은 `default_model`입니다."""
|
||||
"""The model for the agent. Defaults to `default_model`."""
|
||||
|
||||
middleware: NotRequired[list[AgentMiddleware]]
|
||||
"""`default_middleware` 뒤에 추가할 추가 미들웨어."""
|
||||
"""Additional middleware to append after `default_middleware`."""
|
||||
|
||||
interrupt_on: NotRequired[dict[str, bool | InterruptOnConfig]]
|
||||
"""에이전트에 사용할 도구 설정."""
|
||||
"""The tool configs to use for the agent."""
|
||||
|
||||
|
||||
class CompiledSubAgent(TypedDict):
|
||||
"""미리 컴파일된 에이전트 사양."""
|
||||
"""A pre-compiled agent spec."""
|
||||
|
||||
name: str
|
||||
"""에이전트의 이름."""
|
||||
"""The name of the agent."""
|
||||
|
||||
description: str
|
||||
"""에이전트의 설명."""
|
||||
"""The description of the agent."""
|
||||
|
||||
runnable: Runnable
|
||||
"""에이전트에 사용할 Runnable."""
|
||||
"""The Runnable to use for the agent."""
|
||||
|
||||
|
||||
DEFAULT_SUBAGENT_PROMPT = "사용자가 요청하는 목표를 완료하기 위해, 당신은 여러 표준 도구에 접근할 수 있습니다."
|
||||
DEFAULT_SUBAGENT_PROMPT = "In order to complete the objective that the user asks of you, you have access to a number of standard tools."
|
||||
|
||||
# State keys that are excluded when passing state to subagents and when returning
|
||||
# updates from subagents.
|
||||
@@ -68,23 +68,23 @@ DEFAULT_SUBAGENT_PROMPT = "사용자가 요청하는 목표를 완료하기 위
|
||||
# and no clear meaning for returning them from a subagent to the main agent.
|
||||
_EXCLUDED_STATE_KEYS = {"messages", "todos", "structured_response"}
|
||||
|
||||
TASK_TOOL_DESCRIPTION = """격리된 컨텍스트 창(isolated context windows)을 가진 복잡하고 다단계적인 독립 작업을 처리하기 위해 일회성(ephemeral) 서브 에이전트를 실행합니다.
|
||||
TASK_TOOL_DESCRIPTION = """Launch an ephemeral subagent to handle complex, multi-step independent tasks with isolated context windows.
|
||||
|
||||
사용 가능한 에이전트 유형과 그들이 접근할 수 있는 도구:
|
||||
Available agent types and the tools they have access to:
|
||||
{available_agents}
|
||||
|
||||
Task 도구를 사용할 때는 subagent_type 매개변수를 지정하여 사용할 에이전트 유형을 선택해야 합니다.
|
||||
When using the Task tool, you must specify a subagent_type parameter to select which agent type to use.
|
||||
|
||||
## 사용 참고 사항:
|
||||
1. 성능을 극대화하기 위해 가능한 경우 여러 에이전트를 동시에(concurrently) 실행하십시오. 이를 위해 다중 도구 사용(multiple tool uses)이 포함된 단일 메시지를 사용하십시오.
|
||||
2. 에이전트가 완료되면 단일 메시지를 반환합니다. 에이전트가 반환한 결과는 사용자에게 보이지 않습니다. 사용자에게 결과를 보여주려면 결과에 대한 간결한 요약이 담긴 텍스트 메시지를 사용자에게 보내야 합니다.
|
||||
3. 각 에이전트 호출은 상태비저장(stateless)입니다. 서브 에이전트에게 추가 메시지를 보낼 수 없으며, 서브 에이전트도 최종 보고서 이외에는 당신과 통신할 수 없습니다. 따라서 프롬프트에는 에이전트가 자율적으로 수행해야 할 작업에 대한 매우 자세한 설명이 포함되어야 하며, 에이전트가 최종적이고 유일한 메시지로 어떤 정보를 반환해야 하는지 정확히 지정해야 합니다.
|
||||
4. 에이전트의 출력은 일반적으로 신뢰할 수 있어야 합니다.
|
||||
5. 에이전트는 사용자의 의도를 알지 못하므로 콘텐츠 생성, 분석 수행, 또는 단순 연구(검색, 파일 읽기, 웹 가져오기 등) 중 무엇을 수행해야 하는지 명확하게 알려주십시오.
|
||||
6. 에이전트 설명에 선제적으로(proactively) 사용해야 한다고 언급되어 있다면, 사용자가 먼저 요청하지 않아도 최선을 다해 사용해 보십시오. 판단력을 발휘하십시오.
|
||||
7. 범용(general-purpose) 에이전트만 제공되는 경우 모든 작업에 해당 에이전트를 사용해야 합니다. 메인 에이전트와 동일한 모든 기능을 갖추고 있으므로, 컨텍스트와 토큰 사용을 격리하고 특정하고 복잡한 작업을 완료하는 데 매우 적합합니다.
|
||||
## Usage notes:
|
||||
1. Launch multiple agents concurrently whenever possible, to maximize performance; to do that, use a single message with multiple tool uses
|
||||
2. When the agent is done, it will return a single message back to you. The result returned by the agent is not visible to the user. To show the user the result, you should send a text message back to the user with a concise summary of the result.
|
||||
3. Each agent invocation is stateless. You will not be able to send additional messages to the agent, nor will the agent be able to communicate with you outside of its final report. Therefore, your prompt should contain a highly detailed task description for the agent to perform autonomously and you should specify exactly what information the agent should return back to you in its final and only message to you.
|
||||
4. The agent's outputs should generally be trusted
|
||||
5. Clearly tell the agent whether you expect it to create content, perform analysis, or just do research (search, file reads, web fetches, etc.), since it is not aware of the user's intent
|
||||
6. If the agent description mentions that it should be used proactively, then you should try your best to use it without the user having to ask for it first. Use your judgement.
|
||||
7. When only the general-purpose agent is provided, you should use it for all tasks. It is great for isolating context and token usage, and completing specific, complex tasks, as it has all the same capabilities as the main agent.
|
||||
|
||||
### 범용 에이전트 사용 예시:
|
||||
### Example usage of the general-purpose agent:
|
||||
|
||||
<example_agent_descriptions>
|
||||
"general-purpose": use this agent for general purpose tasks, it has access to all tools as the main agent.
|
||||
@@ -95,11 +95,11 @@ User: "I want to conduct research on the accomplishments of Lebron James, Michae
|
||||
Assistant: *Uses the task tool in parallel to conduct isolated research on each of the three players*
|
||||
Assistant: *Synthesizes the results of the three isolated research tasks and responds to the User*
|
||||
<commentary>
|
||||
연구는 그 자체로 복잡하고 다단계적인 작업입니다.
|
||||
각 개별 선수의 연구는 다른 선수의 연구에 의존하지 않습니다.
|
||||
어시스턴트는 task 도구를 사용하여 복잡한 목표를 세 가지 독립적인 작업으로 나눕니다.
|
||||
각 연구 작업은 한 선수에 대한 컨텍스트와 토큰만 신경 쓰면 되며, 도구 결과로 각 선수에 대한 종합된 정보를 반환합니다.
|
||||
이는 각 연구 작업이 각 선수를 깊이 있게 연구하는 데 토큰과 컨텍스트를 사용할 수 있음을 의미하며, 최종 결과는 종합된 정보이므로 선수들을 서로 비교할 때 장기적으로 토큰을 절약할 수 있습니다.
|
||||
Research is a complex, multi-step task in it of itself.
|
||||
The research of each individual player is not dependent on the research of the other players.
|
||||
The assistant uses the task tool to break down the complex objective into three isolated tasks.
|
||||
Each research task only needs to worry about context and tokens about one player, then returns synthesized information about each player as the Tool Result.
|
||||
This means each research task can dive deep and spend tokens and context deeply researching each player, but the final result is synthesized information, and saves us tokens in the long run when comparing the players to each other.
|
||||
</commentary>
|
||||
</example>
|
||||
|
||||
@@ -108,8 +108,8 @@ User: "Analyze a single large code repository for security vulnerabilities and g
|
||||
Assistant: *Launches a single `task` subagent for the repository analysis*
|
||||
Assistant: *Receives report and integrates results into final summary*
|
||||
<commentary>
|
||||
서브 에이전트는 단 하나라도 크고 컨텍스트가 많은 작업을 격리하는 데 사용됩니다. 이는 메인 스레드가 세부 사항으로 과부하되는 것을 방지합니다.
|
||||
사용자가 후속 질문을 하면 분석 및 도구 호출의 전체 기록 대신 참조할 간결한 보고서가 있으므로 시간과 비용을 절약할 수 있습니다.
|
||||
Subagent is used to isolate a large, context-heavy task, even though there is only one. This prevents the main thread from being overloaded with details.
|
||||
If the user then asks followup questions, we have a concise report to reference instead of the entire history of analysis and tool calls, which is good and saves us time and money.
|
||||
</commentary>
|
||||
</example>
|
||||
|
||||
@@ -118,8 +118,8 @@ User: "Schedule two meetings for me and prepare agendas for each."
|
||||
Assistant: *Calls the task tool in parallel to launch two `task` subagents (one per meeting) to prepare agendas*
|
||||
Assistant: *Returns final schedules and agendas*
|
||||
<commentary>
|
||||
작업은 개별적으로는 간단하지만, 서브 에이전트는 의제 준비를 격리하는 데 도움이 됩니다.
|
||||
각 서브 에이전트는 한 회의의 의제만 신경 쓰면 됩니다.
|
||||
Tasks are simple individually, but subagents help silo agenda preparation.
|
||||
Each subagent only needs to worry about the agenda for one meeting.
|
||||
</commentary>
|
||||
</example>
|
||||
|
||||
@@ -127,8 +127,8 @@ Assistant: *Returns final schedules and agendas*
|
||||
User: "I want to order a pizza from Dominos, order a burger from McDonald's, and order a salad from Subway."
|
||||
Assistant: *Calls tools directly in parallel to order a pizza from Dominos, a burger from McDonald's, and a salad from Subway*
|
||||
<commentary>
|
||||
목표가 매우 간단하고 명확하며 몇 가지 사소한 도구 호출만 필요하므로 어시스턴트는 task 도구를 사용하지 않았습니다.
|
||||
작업을 직접 완료하고 `task` 도구를 사용하지 않는 것이 더 좋습니다.
|
||||
The assistant did not use the task tool because the objective is super simple and clear and only requires a few trivial tool calls.
|
||||
It is better to just complete the task directly and NOT use the `task`tool.
|
||||
</commentary>
|
||||
</example>
|
||||
|
||||
@@ -155,7 +155,7 @@ function isPrime(n) {{
|
||||
}}
|
||||
</code>
|
||||
<commentary>
|
||||
상당한 콘텐츠가 생성되었고 작업이 완료되었으므로, 이제 content-reviewer 에이전트를 사용하여 작업을 검토합니다.
|
||||
Since significant content was created and the task was completed, now use the content-reviewer agent to review the work
|
||||
</commentary>
|
||||
assistant: Now let me use the content-reviewer agent to review the code
|
||||
assistant: Uses the Task tool to launch with the content-reviewer agent
|
||||
@@ -164,7 +164,7 @@ assistant: Uses the Task tool to launch with the content-reviewer agent
|
||||
<example>
|
||||
user: "Can you help me research the environmental impact of different renewable energy sources and create a comprehensive report?"
|
||||
<commentary>
|
||||
이것은 철저한 분석을 수행하기 위해 research-analyst 에이전트를 사용하는 것이 도움이 되는 복잡한 연구 작업입니다.
|
||||
This is a complex research task that would benefit from using the research-analyst agent to conduct thorough analysis
|
||||
</commentary>
|
||||
assistant: I'll help you research the environmental impact of renewable energy sources. Let me use the research-analyst agent to conduct comprehensive research on this topic.
|
||||
assistant: Uses the Task tool to launch with the research-analyst agent, providing detailed instructions about what research to conduct and what format the report should take
|
||||
@@ -173,41 +173,41 @@ assistant: Uses the Task tool to launch with the research-analyst agent, providi
|
||||
<example>
|
||||
user: "Hello"
|
||||
<commentary>
|
||||
사용자가 인사를 하고 있으므로, greeting-responder 에이전트를 사용하여 친절한 농담으로 응답하십시오.
|
||||
Since the user is greeting, use the greeting-responder agent to respond with a friendly joke
|
||||
</commentary>
|
||||
assistant: "I'm going to use the Task tool to launch with the greeting-responder agent"
|
||||
</example>""" # noqa: E501
|
||||
|
||||
TASK_SYSTEM_PROMPT = """## `task` (서브 에이전트 스포너(spawner))
|
||||
TASK_SYSTEM_PROMPT = """## `task` (subagent spawner)
|
||||
|
||||
당신은 격리된 작업을 처리하는 일회성 서브 에이전트를 실행하기 위한 `task` 도구에 접근할 수 있습니다. 이 에이전트들은 일회적(ephemeral)입니다 — 작업 기간 동안에만 존재하며 단일 결과를 반환합니다.
|
||||
You have access to a `task` tool to launch short-lived subagents that handle isolated tasks. These agents are ephemeral — they live only for the duration of the task and return a single result.
|
||||
|
||||
task 도구를 사용해야 하는 경우:
|
||||
- 작업이 복잡하고 다단계적이며 완전히 격리하여 위임할 수 있는 경우
|
||||
- 작업이 다른 작업과 독립적이며 병렬로 실행할 수 있는 경우
|
||||
- 작업에 집중적인 추론이나 많은 토큰/컨텍스트 사용이 필요하여 오케스트레이터 스레드를 부풀릴(bloat) 수 있는 경우
|
||||
- 샌드박싱이 신뢰성을 향상시키는 경우 (예: 코드 실행, 구조화된 검색, 데이터 포맷팅)
|
||||
- 서브 에이전트의 중간 단계가 아니라 출력에만 관심이 있는 경우 (예: 많은 연구를 수행한 후 종합된 보고서를 반환하거나, 간결하고 관련성 있는 답변을 얻기 위해 일련의 계산 또는 조회를 수행하는 경우)
|
||||
When to use the task tool:
|
||||
- When a task is complex and multi-step, and can be fully delegated in isolation
|
||||
- When a task is independent of other tasks and can run in parallel
|
||||
- When a task requires focused reasoning or heavy token/context usage that would bloat the orchestrator thread
|
||||
- When sandboxing improves reliability (e.g. code execution, structured searches, data formatting)
|
||||
- When you only care about the output of the subagent, and not the intermediate steps (ex. performing a lot of research and then returned a synthesized report, performing a series of computations or lookups to achieve a concise, relevant answer.)
|
||||
|
||||
서브 에이전트 생명주기:
|
||||
1. **생성(Spawn)** → 명확한 역할, 지침 및 예상 출력 제공
|
||||
2. **실행(Run)** → 서브 에이전트가 자율적으로 작업 완료
|
||||
3. **반환(Return)** → 서브 에이전트가 단일 구조화된 결과를 제공
|
||||
4. **조정(Reconcile)** → 결과를 메인 스레드에 통합하거나 합성
|
||||
Subagent lifecycle:
|
||||
1. **Spawn** → Provide clear role, instructions, and expected output
|
||||
2. **Run** → The subagent completes the task autonomously
|
||||
3. **Return** → The subagent provides a single structured result
|
||||
4. **Reconcile** → Incorporate or synthesize the result into the main thread
|
||||
|
||||
task 도구를 사용하지 말아야 하는 경우:
|
||||
- 서브 에이전트가 완료된 후 중간 추론이나 단계를 확인해야 하는 경우 (task 도구는 이를 숨깁니다)
|
||||
- 작업이 사소한 경우 (몇 번의 도구 호출 또는 간단한 조회)
|
||||
- 위임이 토큰 사용량, 복잡성 또는 컨텍스트 전환을 줄이지 않는 경우
|
||||
- 분할이 이점 없이 지연 시간만 추가하는 경우
|
||||
When NOT to use the task tool:
|
||||
- If you need to see the intermediate reasoning or steps after the subagent has completed (the task tool hides them)
|
||||
- If the task is trivial (a few tool calls or simple lookup)
|
||||
- If delegating does not reduce token usage, complexity, or context switching
|
||||
- If splitting would add latency without benefit
|
||||
|
||||
## 기억해야 할 중요한 Task 도구 사용 참고 사항
|
||||
- 가능하면 수행하는 작업을 병렬화하십시오. 이는 도구 호출(tool_calls)과 작업(tasks) 모두에 해당합니다. 완료해야 할 독립적인 단계가 있을 때마다 - 도구 호출을 하거나 작업을 병렬로 시작(kick off)하여 더 빠르게 완료하십시오. 이는 사용자에게 매우 중요한 시간을 절약해 줍니다.
|
||||
- 다중 파트 목표 내에서 독립적인 작업을 격리(silo)하려면 `task` 도구를 사용하는 것을 기억하십시오.
|
||||
- 여러 단계가 걸리고 에이전트가 완료해야 하는 다른 작업과 독립적인 복잡한 작업이 있을 때마다 `task` 도구를 사용해야 합니다. 이 에이전트들은 매우 유능하고 효율적입니다.""" # noqa: E501
|
||||
## Important Task Tool Usage Notes to Remember
|
||||
- Whenever possible, parallelize the work that you do. This is true for both tool_calls, and for tasks. Whenever you have independent steps to complete - make tool_calls, or kick off tasks (subagents) in parallel to accomplish them faster. This saves time for the user, which is incredibly important.
|
||||
- Remember to use the `task` tool to silo independent tasks within a multi-part objective.
|
||||
- You should use the `task` tool whenever you have a complex task that will take multiple steps, and is independent from other tasks that the agent needs to complete. These agents are highly competent and efficient.""" # noqa: E501
|
||||
|
||||
|
||||
DEFAULT_GENERAL_PURPOSE_DESCRIPTION = "복잡한 질문 연구, 파일 및 콘텐츠 검색, 다중 단계 작업 실행을 위한 범용 에이전트입니다. 키워드나 파일을 검색할 때 처음 몇 번의 시도로 올바른 일치 항목을 찾을 수 있을지 확신이 서지 않는다면, 이 에이전트를 사용하여 검색을 수행하십시오. 이 에이전트는 메인 에이전트와 동일한 모든 도구에 접근할 수 있습니다." # noqa: E501
|
||||
DEFAULT_GENERAL_PURPOSE_DESCRIPTION = "General-purpose agent for researching complex questions, searching for files and content, and executing multi-step tasks. When you are searching for a keyword or file and are not confident that you will find the right match in the first few tries use this agent to perform the search for you. This agent has access to all tools as the main agent." # noqa: E501
|
||||
|
||||
|
||||
def _get_subagents(
|
||||
@@ -219,20 +219,21 @@ def _get_subagents(
|
||||
subagents: list[SubAgent | CompiledSubAgent],
|
||||
general_purpose_agent: bool,
|
||||
) -> tuple[dict[str, Any], list[str]]:
|
||||
"""사양(specifications)에서 서브 에이전트 인스턴스를 생성합니다.
|
||||
"""Create subagent instances from specifications.
|
||||
|
||||
Args:
|
||||
default_model: 지정하지 않은 서브 에이전트를 위한 기본 모델.
|
||||
default_tools: 지정하지 않은 서브 에이전트를 위한 기본 도구.
|
||||
default_middleware: 모든 서브 에이전트에 적용할 미들웨어. `None`인 경우 기본 미들웨어가 적용되지 않습니다.
|
||||
default_interrupt_on: 기본 범용 서브 에이전트에 사용할 도구 설정입니다.
|
||||
이는 자체 도구 설정을 지정하지 않은 서브 에이전트에 대한 폴백(fallback)이기도 합니다.
|
||||
subagents: 에이전트 사양 또는 미리 컴파일된 에이전트 목록.
|
||||
general_purpose_agent: 범용 서브 에이전트 포함 여부.
|
||||
default_model: Default model for subagents that don't specify one.
|
||||
default_tools: Default tools for subagents that don't specify tools.
|
||||
default_middleware: Middleware to apply to all subagents. If `None`,
|
||||
no default middleware is applied.
|
||||
default_interrupt_on: The tool configs to use for the default general-purpose subagent. These
|
||||
are also the fallback for any subagents that don't specify their own tool configs.
|
||||
subagents: List of agent specifications or pre-compiled agents.
|
||||
general_purpose_agent: Whether to include a general-purpose subagent.
|
||||
|
||||
Returns:
|
||||
(agent_dict, description_list) 튜플. agent_dict는 에이전트 이름을 runnable 인스턴스에 매핑하고,
|
||||
description_list는 포맷된 설명을 포함합니다.
|
||||
Tuple of (agent_dict, description_list) where agent_dict maps agent names
|
||||
to runnable instances and description_list contains formatted descriptions.
|
||||
"""
|
||||
# Use empty list if None (no default middleware)
|
||||
default_subagent_middleware = default_middleware or []
|
||||
@@ -265,11 +266,7 @@ def _get_subagents(
|
||||
|
||||
subagent_model = agent_.get("model", default_model)
|
||||
|
||||
_middleware = (
|
||||
[*default_subagent_middleware, *agent_["middleware"]]
|
||||
if "middleware" in agent_
|
||||
else [*default_subagent_middleware]
|
||||
)
|
||||
_middleware = [*default_subagent_middleware, *agent_["middleware"]] if "middleware" in agent_ else [*default_subagent_middleware]
|
||||
|
||||
interrupt_on = agent_.get("interrupt_on", default_interrupt_on)
|
||||
if interrupt_on:
|
||||
@@ -294,21 +291,21 @@ def _create_task_tool(
|
||||
general_purpose_agent: bool,
|
||||
task_description: str | None = None,
|
||||
) -> BaseTool:
|
||||
"""서브 에이전트를 호출하기 위한 task 도구를 생성합니다.
|
||||
"""Create a task tool for invoking subagents.
|
||||
|
||||
Args:
|
||||
default_model: 서브 에이전트용 기본 모델.
|
||||
default_tools: 서브 에이전트용 기본 도구.
|
||||
default_middleware: 모든 서브 에이전트에 적용할 미들웨어.
|
||||
default_interrupt_on: 기본 범용 서브 에이전트에 사용할 도구 설정입니다.
|
||||
이는 자체 도구 설정을 지정하지 않은 서브 에이전트에 대한 폴백이기도 합니다.
|
||||
subagents: 서브 에이전트 사양 목록.
|
||||
general_purpose_agent: 범용 에이전트 포함 여부.
|
||||
task_description: task 도구에 대한 사용자 정의 설명. `None`인 경우
|
||||
기본 템플릿을 사용합니다. `{available_agents}` 플레이스홀더를 지원합니다.
|
||||
default_model: Default model for subagents.
|
||||
default_tools: Default tools for subagents.
|
||||
default_middleware: Middleware to apply to all subagents.
|
||||
default_interrupt_on: The tool configs to use for the default general-purpose subagent. These
|
||||
are also the fallback for any subagents that don't specify their own tool configs.
|
||||
subagents: List of subagent specifications.
|
||||
general_purpose_agent: Whether to include general-purpose agent.
|
||||
task_description: Custom description for the task tool. If `None`,
|
||||
uses default template. Supports `{available_agents}` placeholder.
|
||||
|
||||
Returns:
|
||||
유형별로 서브 에이전트를 호출할 수 있는 StructuredTool.
|
||||
A StructuredTool that can invoke subagents by type.
|
||||
"""
|
||||
subagent_graphs, subagent_descriptions = _get_subagents(
|
||||
default_model=default_model,
|
||||
@@ -331,9 +328,7 @@ def _create_task_tool(
|
||||
}
|
||||
)
|
||||
|
||||
def _validate_and_prepare_state(
|
||||
subagent_type: str, description: str, runtime: ToolRuntime
|
||||
) -> tuple[Runnable, dict]:
|
||||
def _validate_and_prepare_state(subagent_type: str, description: str, runtime: ToolRuntime) -> tuple[Runnable, dict]:
|
||||
"""Prepare state for invocation."""
|
||||
subagent = subagent_graphs[subagent_type]
|
||||
# Create a new state dict to avoid mutating the original
|
||||
@@ -355,11 +350,11 @@ def _create_task_tool(
|
||||
) -> str | Command:
|
||||
if subagent_type not in subagent_graphs:
|
||||
allowed_types = ", ".join([f"`{k}`" for k in subagent_graphs])
|
||||
return f"{subagent_type} 서브 에이전트는 존재하지 않으므로 호출할 수 없습니다. 허용된 유형은 다음과 같습니다: {allowed_types}"
|
||||
return f"We cannot invoke subagent {subagent_type} because it does not exist, the only allowed types are {allowed_types}"
|
||||
subagent, subagent_state = _validate_and_prepare_state(subagent_type, description, runtime)
|
||||
result = subagent.invoke(subagent_state, runtime.config)
|
||||
if not runtime.tool_call_id:
|
||||
value_error_msg = "서브 에이전트 호출에는 도구 호출 ID가 필요합니다"
|
||||
value_error_msg = "Tool call ID is required for subagent invocation"
|
||||
raise ValueError(value_error_msg)
|
||||
return _return_command_with_state_update(result, runtime.tool_call_id)
|
||||
|
||||
@@ -370,11 +365,11 @@ def _create_task_tool(
|
||||
) -> str | Command:
|
||||
if subagent_type not in subagent_graphs:
|
||||
allowed_types = ", ".join([f"`{k}`" for k in subagent_graphs])
|
||||
return f"{subagent_type} 서브 에이전트는 존재하지 않으므로 호출할 수 없습니다. 허용된 유형은 다음과 같습니다: {allowed_types}"
|
||||
return f"We cannot invoke subagent {subagent_type} because it does not exist, the only allowed types are {allowed_types}"
|
||||
subagent, subagent_state = _validate_and_prepare_state(subagent_type, description, runtime)
|
||||
result = await subagent.ainvoke(subagent_state, runtime.config)
|
||||
if not runtime.tool_call_id:
|
||||
value_error_msg = "서브 에이전트 호출에는 도구 호출 ID가 필요합니다"
|
||||
value_error_msg = "Tool call ID is required for subagent invocation"
|
||||
raise ValueError(value_error_msg)
|
||||
return _return_command_with_state_update(result, runtime.tool_call_id)
|
||||
|
||||
@@ -387,33 +382,35 @@ def _create_task_tool(
|
||||
|
||||
|
||||
class SubAgentMiddleware(AgentMiddleware):
|
||||
"""`task` 도구를 통해 에이전트에게 서브 에이전트를 제공하기 위한 미들웨어.
|
||||
"""Middleware for providing subagents to an agent via a `task` tool.
|
||||
|
||||
이 미들웨어는 서브 에이전트를 호출하는 데 사용할 수 있는 `task` 도구를 에이전트에 추가합니다.
|
||||
서브 에이전트는 여러 단계가 필요한 복잡한 작업이나 해결하기 위해 많은 컨텍스트가 필요한 작업을 처리하는 데 유용합니다.
|
||||
This middleware adds a `task` tool to the agent that can be used to invoke subagents.
|
||||
Subagents are useful for handling complex tasks that require multiple steps, or tasks
|
||||
that require a lot of context to resolve.
|
||||
|
||||
서브 에이전트의 주된 이점은 다중 단계 작업을 처리한 다음,
|
||||
깨끗하고 간결한 응답을 메인 에이전트에게 반환할 수 있다는 것입니다.
|
||||
A chief benefit of subagents is that they can handle multi-step tasks, and then return
|
||||
a clean, concise response to the main agent.
|
||||
|
||||
서브 에이전트는 좁은 도구 집합과 집중이 필요한 다양한 전문 분야에도 적합합니다.
|
||||
Subagents are also great for different domains of expertise that require a narrower
|
||||
subset of tools and focus.
|
||||
|
||||
이 미들웨어에는 격리된 컨텍스트에서 메인 에이전트와 동일한 작업을 처리하는 데 사용할 수 있는
|
||||
기본 범용 서브 에이전트가 함께 제공됩니다.
|
||||
This middleware comes with a default general-purpose subagent that can be used to
|
||||
handle the same tasks as the main agent, but with isolated context.
|
||||
|
||||
Args:
|
||||
default_model: 서브 에이전트에 사용할 모델.
|
||||
LanguageModelLike 또는 init_chat_model을 위한 dict일 수 있습니다.
|
||||
default_tools: 기본 범용 서브 에이전트에 사용할 도구.
|
||||
default_middleware: 모든 서브 에이전트에 적용할 기본 미들웨어. `None`(기본값)인 경우
|
||||
기본 미들웨어가 적용되지 않습니다. 사용자 정의 미들웨어를 지정하려면 목록을 전달하십시오.
|
||||
default_interrupt_on: 기본 범용 서브 에이전트에 사용할 도구 설정입니다.
|
||||
이는 자체 도구 설정을 지정하지 않은 서브 에이전트에 대한 폴백이기도 합니다.
|
||||
subagents: 에이전트에 제공할 추가 서브 에이전트 목록.
|
||||
system_prompt: 전체 시스템 프롬프트 재정의. 제공된 경우 에이전트의
|
||||
시스템 프롬프트를 완전히 대체합니다.
|
||||
general_purpose_agent: 범용 에이전트 포함 여부. 기본값은 `True`입니다.
|
||||
task_description: task 도구에 대한 사용자 정의 설명. `None`인 경우
|
||||
기본 설명 템플릿을 사용합니다.
|
||||
default_model: The model to use for subagents.
|
||||
Can be a LanguageModelLike or a dict for init_chat_model.
|
||||
default_tools: The tools to use for the default general-purpose subagent.
|
||||
default_middleware: Default middleware to apply to all subagents. If `None` (default),
|
||||
no default middleware is applied. Pass a list to specify custom middleware.
|
||||
default_interrupt_on: The tool configs to use for the default general-purpose subagent. These
|
||||
are also the fallback for any subagents that don't specify their own tool configs.
|
||||
subagents: A list of additional subagents to provide to the agent.
|
||||
system_prompt: Full system prompt override. When provided, completely replaces
|
||||
the agent's system prompt.
|
||||
general_purpose_agent: Whether to include the general-purpose agent. Defaults to `True`.
|
||||
task_description: Custom description for the task tool. If `None`, uses the
|
||||
default description template.
|
||||
|
||||
Example:
|
||||
```python
|
||||
@@ -457,7 +454,7 @@ class SubAgentMiddleware(AgentMiddleware):
|
||||
general_purpose_agent: bool = True,
|
||||
task_description: str | None = None,
|
||||
) -> None:
|
||||
"""SubAgentMiddleware를 초기화합니다."""
|
||||
"""Initialize the SubAgentMiddleware."""
|
||||
super().__init__()
|
||||
self.system_prompt = system_prompt
|
||||
task_tool = _create_task_tool(
|
||||
@@ -476,11 +473,9 @@ class SubAgentMiddleware(AgentMiddleware):
|
||||
request: ModelRequest,
|
||||
handler: Callable[[ModelRequest], ModelResponse],
|
||||
) -> ModelResponse:
|
||||
"""시스템 프롬프트를 업데이트하여 서브 에이전트 사용 지침을 포함합니다."""
|
||||
"""Update the system prompt to include instructions on using subagents."""
|
||||
if self.system_prompt is not None:
|
||||
system_prompt = (
|
||||
request.system_prompt + "\n\n" + self.system_prompt if request.system_prompt else self.system_prompt
|
||||
)
|
||||
system_prompt = request.system_prompt + "\n\n" + self.system_prompt if request.system_prompt else self.system_prompt
|
||||
return handler(request.override(system_prompt=system_prompt))
|
||||
return handler(request)
|
||||
|
||||
@@ -489,10 +484,8 @@ class SubAgentMiddleware(AgentMiddleware):
|
||||
request: ModelRequest,
|
||||
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
|
||||
) -> ModelResponse:
|
||||
"""(async) 시스템 프롬프트를 업데이트하여 서브 에이전트 사용 지침을 포함합니다."""
|
||||
"""(async) Update the system prompt to include instructions on using subagents."""
|
||||
if self.system_prompt is not None:
|
||||
system_prompt = (
|
||||
request.system_prompt + "\n\n" + self.system_prompt if request.system_prompt else self.system_prompt
|
||||
)
|
||||
system_prompt = request.system_prompt + "\n\n" + self.system_prompt if request.system_prompt else self.system_prompt
|
||||
return await handler(request.override(system_prompt=system_prompt))
|
||||
return await handler(request)
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
[project]
|
||||
name = "deepagents"
|
||||
version = "0.3.1"
|
||||
version = "0.3.5"
|
||||
description = "General purpose 'deep agent' with sub-agent spawning, todo list capabilities, and mock file system. Built on LangGraph."
|
||||
readme = "README.md"
|
||||
license = { text = "MIT" }
|
||||
requires-python = ">=3.11,<4.0"
|
||||
dependencies = [
|
||||
"langchain-anthropic>=1.2.0,<2.0.0",
|
||||
"langchain-google-genai",
|
||||
"langchain>=1.1.0,<2.0.0",
|
||||
"langchain-core>=1.1.0,<2.0.0",
|
||||
"langchain-core>=1.2.6,<2.0.0",
|
||||
"langchain>=1.2.3,<2.0.0",
|
||||
"langchain-anthropic>=1.3.1,<2.0.0",
|
||||
"langchain-google-genai>=4.1.3,<5.0.0",
|
||||
"wcmatch",
|
||||
]
|
||||
|
||||
@@ -88,6 +88,41 @@ ignore-var-parameters = true
|
||||
# Add more test-specific ignores
|
||||
]
|
||||
|
||||
"deepagents/backends/composite.py" = ["B007", "BLE001", "D102", "EM101", "FBT001", "FBT002", "PLW2901", "S110"]
|
||||
"deepagents/backends/filesystem.py" = ["BLE001", "D102", "D205", "D417", "DTZ006", "EM101", "EM102", "FBT001", "FBT002", "PLR0912", "S112", "TRY003"]
|
||||
"deepagents/backends/protocol.py" = ["B024", "B027", "FBT001", "FBT002"]
|
||||
"deepagents/backends/sandbox.py" = ["FBT001", "FBT002", "PLR2004"]
|
||||
"deepagents/backends/state.py" = ["ANN204", "D102", "D205", "EM101", "FBT001", "FBT002", "PERF401"]
|
||||
"deepagents/backends/store.py" = ["A002", "ANN204", "BLE001", "D102", "D205", "F821", "FBT001", "FBT002", "PERF401"]
|
||||
"deepagents/backends/utils.py" = ["D301", "E501", "EM101", "FBT001", "RET504", "RUF005", "TRY003"]
|
||||
"deepagents/middleware/filesystem.py" = ["EM102", "TRY003"]
|
||||
"deepagents/middleware/memory.py" = ["E501", "EM102", "G004", "PERF401", "SIM108", "T201", "TC002", "TC003", "TRY003"]
|
||||
"deepagents/middleware/skills.py" = ["EM101", "SIM108", "TC002", "TC003", "TRY003"]
|
||||
"tests/integration_tests/test_deepagents.py" = ["ANN201", "C419", "E731", "PLR2004", "SIM118", "TID252"]
|
||||
"tests/integration_tests/test_filesystem_middleware.py" = ["ANN001", "ANN201", "ANN202", "ARG002", "E731", "PLR2004", "SIM118", "T201", "TID252"]
|
||||
"tests/integration_tests/test_hitl.py" = ["ANN201", "C419", "E501", "PLR2004", "TID252"]
|
||||
"tests/integration_tests/test_subagent_middleware.py" = ["ANN001", "ANN201", "F841", "RUF012", "SIM118"]
|
||||
"tests/unit_tests/backends/test_composite_backend.py" = ["ANN001", "ANN201", "ANN202", "ARG001", "ARG002", "F841", "INP001", "PLR2004", "PT018"]
|
||||
"tests/unit_tests/backends/test_composite_backend_async.py" = ["ANN001", "ANN201", "ANN202", "ARG001", "ARG002", "F841", "INP001", "PLR2004", "PT018"]
|
||||
"tests/unit_tests/backends/test_filesystem_backend.py" = ["ANN201", "ARG005", "B007", "B011", "INP001", "PLR2004", "PT015", "PT018"]
|
||||
"tests/unit_tests/backends/test_filesystem_backend_async.py" = ["ANN201", "ARG005", "B007", "INP001", "PLR2004", "PT011", "PT018"]
|
||||
"tests/unit_tests/backends/test_state_backend.py" = ["ANN001", "ANN201", "INP001", "PLR2004", "PT018"]
|
||||
"tests/unit_tests/backends/test_state_backend_async.py" = ["ANN001", "ANN201", "INP001", "PLR2004", "PT018"]
|
||||
"tests/unit_tests/backends/test_store_backend.py" = ["ANN201", "INP001", "PLR2004", "PT018"]
|
||||
"tests/unit_tests/backends/test_store_backend_async.py" = ["ANN201", "INP001", "PLR2004", "PT018"]
|
||||
"tests/unit_tests/chat_model.py" = ["ARG002", "D301", "PLR0912", "RUF012"]
|
||||
"tests/unit_tests/middleware/test_memory_middleware.py" = ["F841", "PGH003", "PLR2004", "RUF001", "TC002"]
|
||||
"tests/unit_tests/middleware/test_memory_middleware_async.py" = ["F841", "PGH003", "PLR2004", "RUF001"]
|
||||
"tests/unit_tests/middleware/test_skills_middleware.py" = ["F841", "PGH003", "PLR2004", "TC002"]
|
||||
"tests/unit_tests/middleware/test_skills_middleware_async.py" = ["F841", "PGH003", "PLR2004"]
|
||||
"tests/unit_tests/middleware/test_validate_path.py" = ["ANN201"]
|
||||
"tests/unit_tests/test_end_to_end.py" = ["ARG002", "PLR2004"]
|
||||
"tests/unit_tests/test_middleware.py" = ["ANN001", "ANN201", "ANN202", "ARG002", "E731", "PLR2004", "SIM118", "T201"]
|
||||
"tests/unit_tests/test_middleware_async.py" = ["ANN001", "ANN201", "ANN202", "ARG002"]
|
||||
"tests/unit_tests/test_subagents.py" = ["PLR2004"]
|
||||
"tests/unit_tests/test_todo_middleware.py" = ["E501", "PLR2004"]
|
||||
"tests/utils.py" = ["ANN001", "ANN201", "RUF012", "SIM118"]
|
||||
|
||||
[tool.mypy]
|
||||
strict = true
|
||||
ignore_missing_imports = true
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from langchain.agents import create_agent
|
||||
from langchain.agents.structured_output import ToolStrategy
|
||||
from langchain_core.messages import HumanMessage
|
||||
from langgraph.store.memory import InMemoryStore
|
||||
from pydantic import BaseModel
|
||||
|
||||
from deepagents.backends import CompositeBackend, StateBackend, StoreBackend
|
||||
from deepagents.graph import create_deep_agent
|
||||
|
||||
from ..utils import (
|
||||
@@ -163,3 +167,30 @@ class TestDeepAgents:
|
||||
response = agent.invoke({"messages": [{"role": "user", "content": "Who are all of the Kanto starters?"}]})
|
||||
structured_output = response["structured_response"]
|
||||
assert len(structured_output.pokemon) == 3
|
||||
|
||||
async def test_with_memory_middleware(self):
|
||||
store = InMemoryStore()
|
||||
now = datetime.now(UTC).isoformat()
|
||||
store.put(
|
||||
("filesystem",),
|
||||
"/AGENTS.md",
|
||||
{
|
||||
"content": ["Your name is Jackson"],
|
||||
"created_at": now,
|
||||
"modified_at": now,
|
||||
},
|
||||
)
|
||||
sample_backend = lambda rt: CompositeBackend(
|
||||
default=StateBackend(rt),
|
||||
routes={
|
||||
"/memories/": StoreBackend(rt),
|
||||
},
|
||||
)
|
||||
agent = create_deep_agent(
|
||||
backend=sample_backend,
|
||||
memory=["/memories/AGENTS.md"],
|
||||
store=store,
|
||||
)
|
||||
assert_all_deepagent_qualities(agent)
|
||||
result = await agent.ainvoke({"messages": [HumanMessage(content="What is your name?")]})
|
||||
assert "Jackson" in result["messages"][-1].content
|
||||
|
||||
@@ -206,8 +206,38 @@ def test_composite_backend_multiple_routes():
|
||||
assert "persistent memory" in updated_content
|
||||
|
||||
|
||||
def test_composite_backend_ls_nested_directories(tmp_path: Path):
|
||||
def test_composite_backend_grep_path_isolation():
|
||||
"""Test that grep with path=/tools doesn't return results from /memories."""
|
||||
rt = make_runtime("t7")
|
||||
|
||||
# Use StateBackend as default, StoreBackend for /memories/
|
||||
state = StateBackend(rt)
|
||||
store = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=state, routes={"/memories/": store})
|
||||
|
||||
# Write to state backend (default) in /tools directory
|
||||
comp.write("/tools/hammer.txt", "tool for nailing")
|
||||
comp.write("/tools/saw.txt", "tool for cutting")
|
||||
|
||||
# Write to memories route with content that would match our grep
|
||||
comp.write("/memories/workshop.txt", "tool shed location")
|
||||
comp.write("/memories/notes.txt", "remember to buy tools")
|
||||
|
||||
# Grep for "tool" in /tools directory - should NOT return /memories results
|
||||
matches = comp.grep_raw("tool", path="/tools")
|
||||
match_paths = [m["path"] for m in matches] if isinstance(matches, list) else []
|
||||
|
||||
# Should find results in /tools
|
||||
assert any("/tools/hammer.txt" in p for p in match_paths)
|
||||
assert any("/tools/saw.txt" in p for p in match_paths)
|
||||
|
||||
# Should NOT find results in /memories (this is the bug)
|
||||
assert not any("/memories/" in p for p in match_paths), f"grep path=/tools should not return /memories results, but got: {match_paths}"
|
||||
|
||||
|
||||
def test_composite_backend_ls_nested_directories(tmp_path: Path):
|
||||
rt = make_runtime("t8")
|
||||
root = tmp_path
|
||||
|
||||
files = {
|
||||
@@ -669,3 +699,417 @@ def test_composite_download_preserves_original_paths(tmp_path: Path):
|
||||
# Response should have the original composite path, not stripped
|
||||
assert responses[0].path == "/subdir/file.bin"
|
||||
assert responses[0].content == b"Nested file"
|
||||
|
||||
|
||||
def test_composite_grep_targeting_specific_route(tmp_path: Path) -> None:
|
||||
"""Test grep with path targeting a specific routed backend."""
|
||||
rt = make_runtime("t_grep1")
|
||||
root = tmp_path
|
||||
|
||||
# Setup filesystem backend with some files
|
||||
(root / "default.txt").write_text("default backend content")
|
||||
(root / "default2.txt").write_text("more default stuff")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store})
|
||||
|
||||
# Write to memories route
|
||||
comp.write("/memories/note1.txt", "memory content alpha")
|
||||
comp.write("/memories/note2.txt", "memory content beta")
|
||||
|
||||
# Grep with path="/memories/" should only search memories backend
|
||||
matches = comp.grep_raw("memory", path="/memories/")
|
||||
assert isinstance(matches, list)
|
||||
match_paths = [m["path"] for m in matches]
|
||||
|
||||
# Should find matches in /memories/
|
||||
assert any("/memories/note1.txt" in p for p in match_paths)
|
||||
assert any("/memories/note2.txt" in p for p in match_paths)
|
||||
|
||||
# Should NOT find matches in default backend
|
||||
assert not any("/default" in p for p in match_paths)
|
||||
|
||||
|
||||
def test_composite_grep_with_glob_filter(tmp_path: Path) -> None:
|
||||
"""Test grep with glob parameter to filter files."""
|
||||
rt = make_runtime("t_grep2")
|
||||
root = tmp_path
|
||||
|
||||
# Create files with different extensions
|
||||
(root / "script.py").write_text("python code here")
|
||||
(root / "config.json").write_text("json config here")
|
||||
(root / "readme.md").write_text("markdown docs here")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store})
|
||||
|
||||
# Add some files to memories route
|
||||
comp.write("/memories/notes.py", "python notes here")
|
||||
comp.write("/memories/data.json", "json data here")
|
||||
|
||||
# Grep with glob="*.py" should only search Python files
|
||||
matches = comp.grep_raw("here", path="/", glob="*.py")
|
||||
assert isinstance(matches, list)
|
||||
match_paths = [m["path"] for m in matches]
|
||||
|
||||
# Should find .py files
|
||||
assert any("/script.py" in p for p in match_paths)
|
||||
assert any("/memories/notes.py" in p for p in match_paths)
|
||||
|
||||
# Should NOT find non-.py files
|
||||
assert not any(".json" in p for p in match_paths)
|
||||
assert not any(".md" in p for p in match_paths)
|
||||
|
||||
|
||||
def test_composite_grep_with_glob_in_specific_route(tmp_path: Path) -> None:
|
||||
"""Test grep with glob parameter targeting a specific route."""
|
||||
rt = make_runtime("t_grep3")
|
||||
root = tmp_path
|
||||
|
||||
(root / "local.md").write_text("local markdown")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store})
|
||||
|
||||
# Add files to memories
|
||||
comp.write("/memories/important.md", "important notes")
|
||||
comp.write("/memories/data.txt", "text data")
|
||||
|
||||
# Grep memories with glob="*.md"
|
||||
matches = comp.grep_raw("notes", path="/memories/", glob="*.md")
|
||||
assert isinstance(matches, list)
|
||||
match_paths = [m["path"] for m in matches]
|
||||
|
||||
# Should find .md file in memories
|
||||
assert any("/memories/important.md" in p for p in match_paths)
|
||||
|
||||
# Should NOT find .txt files or default backend files
|
||||
assert not any("/memories/data.txt" in p for p in match_paths)
|
||||
assert not any("/local.md" in p for p in match_paths)
|
||||
|
||||
|
||||
def test_composite_grep_with_path_none(tmp_path: Path) -> None:
|
||||
"""Test grep with path=None behaves like path='/'."""
|
||||
rt = make_runtime("t_grep4")
|
||||
root = tmp_path
|
||||
|
||||
(root / "file1.txt").write_text("searchable content")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store})
|
||||
|
||||
comp.write("/memories/file2.txt", "searchable memory")
|
||||
|
||||
# Grep with path=None
|
||||
matches_none = comp.grep_raw("searchable", path=None)
|
||||
assert isinstance(matches_none, list)
|
||||
|
||||
# Grep with path="/"
|
||||
matches_root = comp.grep_raw("searchable", path="/")
|
||||
assert isinstance(matches_root, list)
|
||||
|
||||
# Both should return same results
|
||||
paths_none = sorted([m["path"] for m in matches_none])
|
||||
paths_root = sorted([m["path"] for m in matches_root])
|
||||
|
||||
assert paths_none == paths_root
|
||||
assert len(paths_none) == 2
|
||||
|
||||
|
||||
def test_composite_grep_invalid_regex(tmp_path: Path) -> None:
|
||||
"""Test grep with invalid regex pattern returns error string."""
|
||||
rt = make_runtime("t_grep5")
|
||||
root = tmp_path
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
comp = CompositeBackend(default=fs, routes={})
|
||||
|
||||
# Invalid regex patterns
|
||||
result = comp.grep_raw("[invalid(", path="/")
|
||||
assert isinstance(result, str)
|
||||
assert "Invalid regex" in result or "error" in result.lower()
|
||||
|
||||
|
||||
def test_composite_grep_nested_path_in_route(tmp_path: Path) -> None:
|
||||
"""Test grep with nested path within a routed backend."""
|
||||
rt = make_runtime("t_grep6")
|
||||
root = tmp_path
|
||||
|
||||
(root / "local.txt").write_text("local content")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store})
|
||||
|
||||
# Create nested structure in memories
|
||||
comp.write("/memories/docs/readme.md", "documentation here")
|
||||
comp.write("/memories/docs/guide.md", "guide here")
|
||||
comp.write("/memories/notes.txt", "notes here")
|
||||
|
||||
# Grep with nested path
|
||||
matches = comp.grep_raw("here", path="/memories/docs/")
|
||||
assert isinstance(matches, list)
|
||||
match_paths = [m["path"] for m in matches]
|
||||
|
||||
# Should find files in /memories/docs/
|
||||
assert any("/memories/docs/readme.md" in p for p in match_paths)
|
||||
assert any("/memories/docs/guide.md" in p for p in match_paths)
|
||||
|
||||
# Should NOT find files outside /memories/docs/
|
||||
assert not any("/memories/notes.txt" in p for p in match_paths)
|
||||
assert not any("/local.txt" in p for p in match_paths)
|
||||
|
||||
|
||||
def test_composite_grep_empty_results(tmp_path: Path) -> None:
|
||||
"""Test grep that matches nothing returns empty list."""
|
||||
rt = make_runtime("t_grep7")
|
||||
root = tmp_path
|
||||
|
||||
(root / "file.txt").write_text("some content")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store})
|
||||
|
||||
comp.write("/memories/note.txt", "memory content")
|
||||
|
||||
# Search for pattern that doesn't exist
|
||||
matches = comp.grep_raw("nonexistent_pattern_xyz", path="/")
|
||||
assert isinstance(matches, list)
|
||||
assert len(matches) == 0
|
||||
|
||||
|
||||
def test_composite_grep_route_prefix_restoration(tmp_path: Path) -> None:
|
||||
"""Test that grep correctly restores route prefixes in results."""
|
||||
rt = make_runtime("t_grep8")
|
||||
root = tmp_path
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store})
|
||||
|
||||
# Write files to memories
|
||||
comp.write("/memories/alpha.txt", "test content alpha")
|
||||
comp.write("/memories/beta.txt", "test content beta")
|
||||
|
||||
# Grep in memories route
|
||||
matches = comp.grep_raw("test", path="/memories/")
|
||||
assert isinstance(matches, list)
|
||||
assert len(matches) > 0
|
||||
|
||||
# All paths should start with /memories/
|
||||
for match in matches:
|
||||
assert match["path"].startswith("/memories/")
|
||||
assert not match["path"].startswith("/memories//") # No double slashes
|
||||
|
||||
# Grep across all backends (path="/")
|
||||
matches_all = comp.grep_raw("test", path="/")
|
||||
assert isinstance(matches_all, list)
|
||||
|
||||
# Filter matches from memories
|
||||
memory_matches = [m for m in matches_all if "/memories/" in m["path"]]
|
||||
for match in memory_matches:
|
||||
assert match["path"].startswith("/memories/")
|
||||
|
||||
|
||||
def test_composite_grep_multiple_matches_per_file(tmp_path: Path) -> None:
|
||||
"""Test grep returns multiple matches from same file."""
|
||||
rt = make_runtime("t_grep9")
|
||||
root = tmp_path
|
||||
|
||||
# File with multiple matching lines
|
||||
(root / "multi.txt").write_text("line1 pattern\nline2 pattern\nline3 other")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
comp = CompositeBackend(default=fs, routes={})
|
||||
|
||||
matches = comp.grep_raw("pattern", path="/")
|
||||
assert isinstance(matches, list)
|
||||
|
||||
# Should have 2 matches from the same file
|
||||
multi_matches = [m for m in matches if "multi.txt" in m["path"]]
|
||||
assert len(multi_matches) == 2
|
||||
|
||||
# Verify line numbers are correct
|
||||
line_numbers = sorted([m["line"] for m in multi_matches])
|
||||
assert line_numbers == [1, 2]
|
||||
|
||||
|
||||
@pytest.mark.xfail(
|
||||
reason="StoreBackend instances share the same underlying store when using the same runtime, "
|
||||
"causing files written to one route to appear in all routes that use the same backend instance. "
|
||||
"This violates the expected isolation between routes."
|
||||
)
|
||||
def test_composite_grep_multiple_routes_aggregation(tmp_path: Path) -> None:
|
||||
"""Test grep aggregates results from multiple routed backends with expected isolation.
|
||||
|
||||
This test represents the intuitive expected behavior: files written to /memories/
|
||||
should only appear in /memories/, and files written to /archive/ should only appear
|
||||
in /archive/.
|
||||
"""
|
||||
rt = make_runtime("t_grep10")
|
||||
root = tmp_path
|
||||
|
||||
(root / "default.txt").write_text("default findme")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store1 = StoreBackend(rt)
|
||||
store2 = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store1, "/archive/": store2})
|
||||
|
||||
# Write to each route
|
||||
comp.write("/memories/mem.txt", "memory findme")
|
||||
comp.write("/archive/arch.txt", "archive findme")
|
||||
|
||||
# Grep across all backends
|
||||
matches = comp.grep_raw("findme", path="/")
|
||||
assert isinstance(matches, list)
|
||||
match_paths = sorted([m["path"] for m in matches])
|
||||
|
||||
# Expected: each file appears only in its own route
|
||||
expected_paths = sorted(
|
||||
[
|
||||
"/archive/arch.txt",
|
||||
"/default.txt",
|
||||
"/memories/mem.txt",
|
||||
]
|
||||
)
|
||||
assert match_paths == expected_paths
|
||||
|
||||
|
||||
def test_composite_grep_error_in_routed_backend() -> None:
|
||||
"""Test grep error handling when routed backend returns error string."""
|
||||
rt = make_runtime("t_grep_err1")
|
||||
|
||||
# Create a mock backend that returns error strings for grep
|
||||
class ErrorBackend(StoreBackend):
|
||||
def grep_raw(self, pattern: str, path: str | None = None, glob: str | None = None):
|
||||
return "Invalid regex pattern error"
|
||||
|
||||
error_backend = ErrorBackend(rt)
|
||||
state_backend = StateBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=state_backend, routes={"/errors/": error_backend})
|
||||
|
||||
# When searching a specific route that errors, return the error
|
||||
result = comp.grep_raw("test", path="/errors/")
|
||||
assert result == "Invalid regex pattern error"
|
||||
|
||||
|
||||
def test_composite_grep_error_in_routed_backend_at_root() -> None:
|
||||
"""Test grep error handling when routed backend errors during root search."""
|
||||
rt = make_runtime("t_grep_err2")
|
||||
|
||||
# Create a mock backend that returns error strings for grep
|
||||
class ErrorBackend(StoreBackend):
|
||||
def grep_raw(self, pattern: str, path: str | None = None, glob: str | None = None):
|
||||
return "Backend error occurred"
|
||||
|
||||
error_backend = ErrorBackend(rt)
|
||||
state_backend = StateBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=state_backend, routes={"/errors/": error_backend})
|
||||
|
||||
# When searching from root and a routed backend errors, return the error
|
||||
result = comp.grep_raw("test", path="/")
|
||||
assert result == "Backend error occurred"
|
||||
|
||||
|
||||
def test_composite_grep_error_in_default_backend_at_root() -> None:
|
||||
"""Test grep error handling when default backend errors during root search."""
|
||||
rt = make_runtime("t_grep_err3")
|
||||
|
||||
# Create a mock backend that returns error strings for grep
|
||||
class ErrorDefaultBackend(StateBackend):
|
||||
def grep_raw(self, pattern: str, path: str | None = None, glob: str | None = None):
|
||||
return "Default backend error"
|
||||
|
||||
error_default = ErrorDefaultBackend(rt)
|
||||
store_backend = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=error_default, routes={"/store/": store_backend})
|
||||
|
||||
# When searching from root and default backend errors, return the error
|
||||
result = comp.grep_raw("test", path="/")
|
||||
assert result == "Default backend error"
|
||||
|
||||
|
||||
def test_composite_grep_non_root_path_on_default_backend(tmp_path: Path) -> None:
|
||||
"""Test grep with non-root path on default backend."""
|
||||
rt = make_runtime("t_grep_default")
|
||||
root = tmp_path
|
||||
|
||||
# Create nested structure
|
||||
(root / "work").mkdir()
|
||||
(root / "work" / "project.txt").write_text("project content")
|
||||
(root / "other.txt").write_text("other content")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store})
|
||||
|
||||
# Search in /work directory (doesn't match any route)
|
||||
matches = comp.grep_raw("content", path="/work")
|
||||
match_paths = [m["path"] for m in matches]
|
||||
|
||||
# Should only find files in /work, not /other.txt
|
||||
assert match_paths == ["/work/project.txt"]
|
||||
|
||||
|
||||
def test_composite_glob_info_targeting_specific_route() -> None:
|
||||
"""Test glob_info when path matches a specific route."""
|
||||
rt = make_runtime("t_glob1")
|
||||
|
||||
store = StoreBackend(rt)
|
||||
state_backend = StateBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=state_backend, routes={"/memories/": store})
|
||||
|
||||
# Write files to memories
|
||||
comp.write("/memories/test.py", "python file")
|
||||
comp.write("/memories/data.json", "json file")
|
||||
comp.write("/memories/docs/readme.md", "markdown file")
|
||||
|
||||
# Write to default backend
|
||||
state_backend.write("/local.py", "local python")
|
||||
|
||||
# Glob in specific route with pattern - should only find .py files in memories
|
||||
results = comp.glob_info("**/*.py", path="/memories/")
|
||||
result_paths = [fi["path"] for fi in results]
|
||||
|
||||
assert result_paths == ["/memories/test.py"]
|
||||
|
||||
|
||||
def test_composite_glob_info_nested_path_in_route() -> None:
|
||||
"""Test glob_info with nested path within route."""
|
||||
rt = make_runtime("t_glob2")
|
||||
|
||||
store = StoreBackend(rt)
|
||||
state_backend = StateBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=state_backend, routes={"/archive/": store})
|
||||
|
||||
# Write nested files
|
||||
comp.write("/archive/2024/jan.log", "january logs")
|
||||
comp.write("/archive/2024/feb.log", "february logs")
|
||||
comp.write("/archive/2023/dec.log", "december logs")
|
||||
comp.write("/archive/notes.txt", "general notes")
|
||||
|
||||
# Glob in nested path within route - should only find .log files in /archive/2024/
|
||||
results = comp.glob_info("*.log", path="/archive/2024/")
|
||||
result_paths = sorted([fi["path"] for fi in results])
|
||||
|
||||
assert result_paths == ["/archive/2024/feb.log", "/archive/2024/jan.log"]
|
||||
|
||||
@@ -580,3 +580,417 @@ async def test_composite_adownload_preserves_original_paths_async(tmp_path: Path
|
||||
# Response should have the original composite path, not stripped
|
||||
assert responses[0].path == "/subdir/file.bin"
|
||||
assert responses[0].content == b"Nested file"
|
||||
|
||||
|
||||
async def test_composite_agrep_targeting_specific_route_async(tmp_path: Path) -> None:
|
||||
"""Test async grep with path targeting a specific routed backend."""
|
||||
rt = make_runtime("t_agrep1")
|
||||
root = tmp_path
|
||||
|
||||
# Setup filesystem backend with some files
|
||||
(root / "default.txt").write_text("default backend content")
|
||||
(root / "default2.txt").write_text("more default stuff")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store})
|
||||
|
||||
# Write to memories route
|
||||
await comp.awrite("/memories/note1.txt", "memory content alpha")
|
||||
await comp.awrite("/memories/note2.txt", "memory content beta")
|
||||
|
||||
# Grep with path="/memories/" should only search memories backend
|
||||
matches = await comp.agrep_raw("memory", path="/memories/")
|
||||
assert isinstance(matches, list)
|
||||
match_paths = [m["path"] for m in matches]
|
||||
|
||||
# Should find matches in /memories/
|
||||
assert any("/memories/note1.txt" in p for p in match_paths)
|
||||
assert any("/memories/note2.txt" in p for p in match_paths)
|
||||
|
||||
# Should NOT find matches in default backend
|
||||
assert not any("/default" in p for p in match_paths)
|
||||
|
||||
|
||||
async def test_composite_agrep_with_glob_filter_async(tmp_path: Path) -> None:
|
||||
"""Test async grep with glob parameter to filter files."""
|
||||
rt = make_runtime("t_agrep2")
|
||||
root = tmp_path
|
||||
|
||||
# Create files with different extensions
|
||||
(root / "script.py").write_text("python code here")
|
||||
(root / "config.json").write_text("json config here")
|
||||
(root / "readme.md").write_text("markdown docs here")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store})
|
||||
|
||||
# Add some files to memories route
|
||||
await comp.awrite("/memories/notes.py", "python notes here")
|
||||
await comp.awrite("/memories/data.json", "json data here")
|
||||
|
||||
# Grep with glob="*.py" should only search Python files
|
||||
matches = await comp.agrep_raw("here", path="/", glob="*.py")
|
||||
assert isinstance(matches, list)
|
||||
match_paths = [m["path"] for m in matches]
|
||||
|
||||
# Should find .py files
|
||||
assert any("/script.py" in p for p in match_paths)
|
||||
assert any("/memories/notes.py" in p for p in match_paths)
|
||||
|
||||
# Should NOT find non-.py files
|
||||
assert not any(".json" in p for p in match_paths)
|
||||
assert not any(".md" in p for p in match_paths)
|
||||
|
||||
|
||||
async def test_composite_agrep_with_glob_in_specific_route_async(tmp_path: Path) -> None:
|
||||
"""Test async grep with glob parameter targeting a specific route."""
|
||||
rt = make_runtime("t_agrep3")
|
||||
root = tmp_path
|
||||
|
||||
(root / "local.md").write_text("local markdown")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store})
|
||||
|
||||
# Add files to memories
|
||||
await comp.awrite("/memories/important.md", "important notes")
|
||||
await comp.awrite("/memories/data.txt", "text data")
|
||||
|
||||
# Grep memories with glob="*.md"
|
||||
matches = await comp.agrep_raw("notes", path="/memories/", glob="*.md")
|
||||
assert isinstance(matches, list)
|
||||
match_paths = [m["path"] for m in matches]
|
||||
|
||||
# Should find .md file in memories
|
||||
assert any("/memories/important.md" in p for p in match_paths)
|
||||
|
||||
# Should NOT find .txt files or default backend files
|
||||
assert not any("/memories/data.txt" in p for p in match_paths)
|
||||
assert not any("/local.md" in p for p in match_paths)
|
||||
|
||||
|
||||
async def test_composite_agrep_with_path_none_async(tmp_path: Path) -> None:
|
||||
"""Test async grep with path=None behaves like path='/'."""
|
||||
rt = make_runtime("t_agrep4")
|
||||
root = tmp_path
|
||||
|
||||
(root / "file1.txt").write_text("searchable content")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store})
|
||||
|
||||
await comp.awrite("/memories/file2.txt", "searchable memory")
|
||||
|
||||
# Grep with path=None
|
||||
matches_none = await comp.agrep_raw("searchable", path=None)
|
||||
assert isinstance(matches_none, list)
|
||||
|
||||
# Grep with path="/"
|
||||
matches_root = await comp.agrep_raw("searchable", path="/")
|
||||
assert isinstance(matches_root, list)
|
||||
|
||||
# Both should return same results
|
||||
paths_none = sorted([m["path"] for m in matches_none])
|
||||
paths_root = sorted([m["path"] for m in matches_root])
|
||||
|
||||
assert paths_none == paths_root
|
||||
assert len(paths_none) == 2
|
||||
|
||||
|
||||
async def test_composite_agrep_invalid_regex_async(tmp_path: Path) -> None:
|
||||
"""Test async grep with invalid regex pattern returns error string."""
|
||||
rt = make_runtime("t_agrep5")
|
||||
root = tmp_path
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
comp = CompositeBackend(default=fs, routes={})
|
||||
|
||||
# Invalid regex patterns
|
||||
result = await comp.agrep_raw("[invalid(", path="/")
|
||||
assert isinstance(result, str)
|
||||
assert "Invalid regex" in result or "error" in result.lower()
|
||||
|
||||
|
||||
async def test_composite_agrep_nested_path_in_route_async(tmp_path: Path) -> None:
|
||||
"""Test async grep with nested path within a routed backend."""
|
||||
rt = make_runtime("t_agrep6")
|
||||
root = tmp_path
|
||||
|
||||
(root / "local.txt").write_text("local content")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store})
|
||||
|
||||
# Create nested structure in memories
|
||||
await comp.awrite("/memories/docs/readme.md", "documentation here")
|
||||
await comp.awrite("/memories/docs/guide.md", "guide here")
|
||||
await comp.awrite("/memories/notes.txt", "notes here")
|
||||
|
||||
# Grep with nested path
|
||||
matches = await comp.agrep_raw("here", path="/memories/docs/")
|
||||
assert isinstance(matches, list)
|
||||
match_paths = [m["path"] for m in matches]
|
||||
|
||||
# Should find files in /memories/docs/
|
||||
assert any("/memories/docs/readme.md" in p for p in match_paths)
|
||||
assert any("/memories/docs/guide.md" in p for p in match_paths)
|
||||
|
||||
# Should NOT find files outside /memories/docs/
|
||||
assert not any("/memories/notes.txt" in p for p in match_paths)
|
||||
assert not any("/local.txt" in p for p in match_paths)
|
||||
|
||||
|
||||
async def test_composite_agrep_empty_results_async(tmp_path: Path) -> None:
|
||||
"""Test async grep that matches nothing returns empty list."""
|
||||
rt = make_runtime("t_agrep7")
|
||||
root = tmp_path
|
||||
|
||||
(root / "file.txt").write_text("some content")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store})
|
||||
|
||||
await comp.awrite("/memories/note.txt", "memory content")
|
||||
|
||||
# Search for pattern that doesn't exist
|
||||
matches = await comp.agrep_raw("nonexistent_pattern_xyz", path="/")
|
||||
assert isinstance(matches, list)
|
||||
assert len(matches) == 0
|
||||
|
||||
|
||||
async def test_composite_agrep_route_prefix_restoration_async(tmp_path: Path) -> None:
|
||||
"""Test async grep correctly restores route prefixes in results."""
|
||||
rt = make_runtime("t_agrep8")
|
||||
root = tmp_path
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store})
|
||||
|
||||
# Write files to memories
|
||||
await comp.awrite("/memories/alpha.txt", "test content alpha")
|
||||
await comp.awrite("/memories/beta.txt", "test content beta")
|
||||
|
||||
# Grep in memories route
|
||||
matches = await comp.agrep_raw("test", path="/memories/")
|
||||
assert isinstance(matches, list)
|
||||
assert len(matches) > 0
|
||||
|
||||
# All paths should start with /memories/
|
||||
for match in matches:
|
||||
assert match["path"].startswith("/memories/")
|
||||
assert not match["path"].startswith("/memories//") # No double slashes
|
||||
|
||||
# Grep across all backends (path="/")
|
||||
matches_all = await comp.agrep_raw("test", path="/")
|
||||
assert isinstance(matches_all, list)
|
||||
|
||||
# Filter matches from memories
|
||||
memory_matches = [m for m in matches_all if "/memories/" in m["path"]]
|
||||
for match in memory_matches:
|
||||
assert match["path"].startswith("/memories/")
|
||||
|
||||
|
||||
async def test_composite_agrep_multiple_matches_per_file_async(tmp_path: Path) -> None:
|
||||
"""Test async grep returns multiple matches from same file."""
|
||||
rt = make_runtime("t_agrep9")
|
||||
root = tmp_path
|
||||
|
||||
# File with multiple matching lines
|
||||
(root / "multi.txt").write_text("line1 pattern\nline2 pattern\nline3 other")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
comp = CompositeBackend(default=fs, routes={})
|
||||
|
||||
matches = await comp.agrep_raw("pattern", path="/")
|
||||
assert isinstance(matches, list)
|
||||
|
||||
# Should have 2 matches from the same file
|
||||
multi_matches = [m for m in matches if "multi.txt" in m["path"]]
|
||||
assert len(multi_matches) == 2
|
||||
|
||||
# Verify line numbers are correct
|
||||
line_numbers = sorted([m["line"] for m in multi_matches])
|
||||
assert line_numbers == [1, 2]
|
||||
|
||||
|
||||
@pytest.mark.xfail(
|
||||
reason="StoreBackend instances share the same underlying store when using the same runtime, "
|
||||
"causing files written to one route to appear in all routes that use the same backend instance. "
|
||||
"This violates the expected isolation between routes."
|
||||
)
|
||||
async def test_composite_agrep_multiple_routes_aggregation_async(tmp_path: Path) -> None:
|
||||
"""Test async grep aggregates results from multiple routed backends with expected isolation.
|
||||
|
||||
This test represents the intuitive expected behavior: files written to /memories/
|
||||
should only appear in /memories/, and files written to /archive/ should only appear
|
||||
in /archive/.
|
||||
"""
|
||||
rt = make_runtime("t_agrep10")
|
||||
root = tmp_path
|
||||
|
||||
(root / "default.txt").write_text("default findme")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store1 = StoreBackend(rt)
|
||||
store2 = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store1, "/archive/": store2})
|
||||
|
||||
# Write to each route
|
||||
await comp.awrite("/memories/mem.txt", "memory findme")
|
||||
await comp.awrite("/archive/arch.txt", "archive findme")
|
||||
|
||||
# Grep across all backends
|
||||
matches = await comp.agrep_raw("findme", path="/")
|
||||
assert isinstance(matches, list)
|
||||
match_paths = sorted([m["path"] for m in matches])
|
||||
|
||||
# Expected: each file appears only in its own route
|
||||
expected_paths = sorted(
|
||||
[
|
||||
"/archive/arch.txt",
|
||||
"/default.txt",
|
||||
"/memories/mem.txt",
|
||||
]
|
||||
)
|
||||
assert match_paths == expected_paths
|
||||
|
||||
|
||||
async def test_composite_agrep_error_in_routed_backend_async() -> None:
|
||||
"""Test async grep error handling when routed backend returns error string."""
|
||||
rt = make_runtime("t_agrep_err1")
|
||||
|
||||
# Create a mock backend that returns error strings for grep
|
||||
class ErrorBackend(StoreBackend):
|
||||
async def agrep_raw(self, pattern: str, path: str | None = None, glob: str | None = None):
|
||||
return "Invalid regex pattern error"
|
||||
|
||||
error_backend = ErrorBackend(rt)
|
||||
state_backend = StateBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=state_backend, routes={"/errors/": error_backend})
|
||||
|
||||
# When searching a specific route that errors, return the error
|
||||
result = await comp.agrep_raw("test", path="/errors/")
|
||||
assert result == "Invalid regex pattern error"
|
||||
|
||||
|
||||
async def test_composite_agrep_error_in_routed_backend_at_root_async() -> None:
|
||||
"""Test async grep error handling when routed backend errors during root search."""
|
||||
rt = make_runtime("t_agrep_err2")
|
||||
|
||||
# Create a mock backend that returns error strings for grep
|
||||
class ErrorBackend(StoreBackend):
|
||||
async def agrep_raw(self, pattern: str, path: str | None = None, glob: str | None = None):
|
||||
return "Backend error occurred"
|
||||
|
||||
error_backend = ErrorBackend(rt)
|
||||
state_backend = StateBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=state_backend, routes={"/errors/": error_backend})
|
||||
|
||||
# When searching from root and a routed backend errors, return the error
|
||||
result = await comp.agrep_raw("test", path="/")
|
||||
assert result == "Backend error occurred"
|
||||
|
||||
|
||||
async def test_composite_agrep_error_in_default_backend_at_root_async() -> None:
|
||||
"""Test async grep error handling when default backend errors during root search."""
|
||||
rt = make_runtime("t_agrep_err3")
|
||||
|
||||
# Create a mock backend that returns error strings for grep
|
||||
class ErrorDefaultBackend(StateBackend):
|
||||
async def agrep_raw(self, pattern: str, path: str | None = None, glob: str | None = None):
|
||||
return "Default backend error"
|
||||
|
||||
error_default = ErrorDefaultBackend(rt)
|
||||
store_backend = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=error_default, routes={"/store/": store_backend})
|
||||
|
||||
# When searching from root and default backend errors, return the error
|
||||
result = await comp.agrep_raw("test", path="/")
|
||||
assert result == "Default backend error"
|
||||
|
||||
|
||||
async def test_composite_agrep_non_root_path_on_default_backend_async(tmp_path: Path) -> None:
|
||||
"""Test async grep with non-root path on default backend."""
|
||||
rt = make_runtime("t_agrep_default")
|
||||
root = tmp_path
|
||||
|
||||
# Create nested structure
|
||||
(root / "work").mkdir()
|
||||
(root / "work" / "project.txt").write_text("project content")
|
||||
(root / "other.txt").write_text("other content")
|
||||
|
||||
fs = FilesystemBackend(root_dir=str(root), virtual_mode=True)
|
||||
store = StoreBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=fs, routes={"/memories/": store})
|
||||
|
||||
# Search in /work directory (doesn't match any route)
|
||||
matches = await comp.agrep_raw("content", path="/work")
|
||||
match_paths = [m["path"] for m in matches]
|
||||
|
||||
# Should only find files in /work, not /other.txt
|
||||
assert match_paths == ["/work/project.txt"]
|
||||
|
||||
|
||||
async def test_composite_aglob_info_targeting_specific_route_async() -> None:
|
||||
"""Test async glob_info when path matches a specific route."""
|
||||
rt = make_runtime("t_aglob1")
|
||||
|
||||
store = StoreBackend(rt)
|
||||
state_backend = StateBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=state_backend, routes={"/memories/": store})
|
||||
|
||||
# Write files to memories
|
||||
await comp.awrite("/memories/test.py", "python file")
|
||||
await comp.awrite("/memories/data.json", "json file")
|
||||
await comp.awrite("/memories/docs/readme.md", "markdown file")
|
||||
|
||||
# Write to default backend
|
||||
await state_backend.awrite("/local.py", "local python")
|
||||
|
||||
# Glob in specific route with pattern - should only find .py files in memories
|
||||
results = await comp.aglob_info("**/*.py", path="/memories/")
|
||||
result_paths = [fi["path"] for fi in results]
|
||||
|
||||
assert result_paths == ["/memories/test.py"]
|
||||
|
||||
|
||||
async def test_composite_aglob_info_nested_path_in_route_async() -> None:
|
||||
"""Test async glob_info with nested path within route."""
|
||||
rt = make_runtime("t_aglob2")
|
||||
|
||||
store = StoreBackend(rt)
|
||||
state_backend = StateBackend(rt)
|
||||
|
||||
comp = CompositeBackend(default=state_backend, routes={"/archive/": store})
|
||||
|
||||
# Write nested files
|
||||
await comp.awrite("/archive/2024/jan.log", "january logs")
|
||||
await comp.awrite("/archive/2024/feb.log", "february logs")
|
||||
await comp.awrite("/archive/2023/dec.log", "december logs")
|
||||
await comp.awrite("/archive/notes.txt", "general notes")
|
||||
|
||||
# Glob in nested path within route - should only find .log files in /archive/2024/
|
||||
results = await comp.aglob_info("*.log", path="/archive/2024/")
|
||||
result_paths = sorted([fi["path"] for fi in results])
|
||||
|
||||
assert result_paths == ["/archive/2024/feb.log", "/archive/2024/jan.log"]
|
||||
|
||||
@@ -23,6 +23,7 @@ class GenericFakeChatModel(BaseChatModel):
|
||||
* Invokes `on_llm_new_token` to allow for testing of callback related code for new
|
||||
tokens.
|
||||
* Includes configurable logic to break messages into chunks for streaming.
|
||||
* Tracks all invoke calls for inspection (messages, kwargs)
|
||||
|
||||
Args:
|
||||
messages: An iterator over messages (use `iter()` to convert a list)
|
||||
@@ -50,6 +51,12 @@ class GenericFakeChatModel(BaseChatModel):
|
||||
stream_delimiter=r"(\\s)"
|
||||
)
|
||||
# Yields: "Hello", " ", "world"
|
||||
|
||||
# Access call history
|
||||
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
|
||||
model.invoke([HumanMessage(content="Hi")])
|
||||
print(model.call_history[0]["messages"])
|
||||
print(model.call_history[0]["kwargs"])
|
||||
"""
|
||||
|
||||
messages: Iterator[AIMessage | str]
|
||||
@@ -62,6 +69,8 @@ class GenericFakeChatModel(BaseChatModel):
|
||||
if you want to pass a list, you can use `iter` to convert it to an iterator.
|
||||
"""
|
||||
|
||||
call_history: list[Any] = []
|
||||
|
||||
stream_delimiter: str | None = None
|
||||
"""Delimiter for chunking content during streaming.
|
||||
|
||||
@@ -70,6 +79,10 @@ class GenericFakeChatModel(BaseChatModel):
|
||||
- Regex pattern: Use re.split() with the pattern (use capture groups to preserve delimiters)
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
"""Initialize the fake chat model with call tracking."""
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def bind_tools(
|
||||
self,
|
||||
tools: Sequence[dict[str, Any] | type | Callable | BaseTool],
|
||||
@@ -88,6 +101,18 @@ class GenericFakeChatModel(BaseChatModel):
|
||||
run_manager: CallbackManagerForLLMRun | None = None,
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
# Record this call
|
||||
self.call_history.append(
|
||||
{
|
||||
"messages": messages,
|
||||
"kwargs": {
|
||||
"stop": stop,
|
||||
"run_manager": run_manager,
|
||||
**kwargs,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
message = next(self.messages)
|
||||
message_ = AIMessage(content=message) if isinstance(message, str) else message
|
||||
generation = ChatGeneration(message=message_)
|
||||
|
||||
@@ -0,0 +1,848 @@
|
||||
"""Unit tests for memory middleware with FilesystemBackend.
|
||||
|
||||
This module tests the memory middleware using end-to-end tests with fake chat models
|
||||
and temporary directories with the FilesystemBackend in normal (non-virtual) mode.
|
||||
"""
|
||||
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
|
||||
from langchain.agents import create_agent
|
||||
from langchain_core.messages import AIMessage, HumanMessage
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
from langgraph.checkpoint.memory import InMemorySaver
|
||||
from langgraph.store.memory import InMemoryStore
|
||||
|
||||
from deepagents.backends.filesystem import FilesystemBackend
|
||||
from deepagents.backends.state import StateBackend
|
||||
from deepagents.backends.store import StoreBackend
|
||||
from deepagents.graph import create_deep_agent
|
||||
from deepagents.middleware.memory import MemoryMiddleware
|
||||
from tests.unit_tests.chat_model import GenericFakeChatModel
|
||||
|
||||
|
||||
def make_memory_content(title: str, content: str) -> str:
|
||||
"""Create AGENTS.md content.
|
||||
|
||||
Args:
|
||||
title: Title for the memory file
|
||||
content: Content body
|
||||
|
||||
Returns:
|
||||
Complete AGENTS.md content as string
|
||||
"""
|
||||
return f"""# {title}
|
||||
|
||||
{content}
|
||||
"""
|
||||
|
||||
|
||||
def create_store_memory_item(content: str) -> dict:
|
||||
"""Create a memory item in StoreBackend FileData format.
|
||||
|
||||
Args:
|
||||
content: Memory content string
|
||||
|
||||
Returns:
|
||||
Dict with content (as list of lines), created_at, and modified_at
|
||||
"""
|
||||
timestamp = datetime.now(UTC).isoformat()
|
||||
return {
|
||||
"content": content.split("\n"),
|
||||
"created_at": timestamp,
|
||||
"modified_at": timestamp,
|
||||
}
|
||||
|
||||
|
||||
def test_format_agent_memory_empty() -> None:
|
||||
"""Test formatting with no contents shows 'No memory loaded'."""
|
||||
middleware = MemoryMiddleware(
|
||||
backend=None, # type: ignore
|
||||
sources=["/test/AGENTS.md"],
|
||||
)
|
||||
result = middleware._format_agent_memory({})
|
||||
|
||||
assert "<agent_memory>" in result
|
||||
assert "</agent_memory>" in result
|
||||
assert "No memory loaded" in result
|
||||
|
||||
|
||||
def test_format_agent_memory_empty_sources() -> None:
|
||||
"""Test formatting with no sources configured."""
|
||||
middleware = MemoryMiddleware(
|
||||
backend=None, # type: ignore
|
||||
sources=[],
|
||||
)
|
||||
result = middleware._format_agent_memory({})
|
||||
|
||||
assert "<agent_memory>" in result
|
||||
assert "</agent_memory>" in result
|
||||
assert "No memory loaded" in result
|
||||
|
||||
|
||||
def test_format_agent_memory_single() -> None:
|
||||
"""Test formatting with single source shows location and content paired."""
|
||||
middleware = MemoryMiddleware(
|
||||
backend=None, # type: ignore
|
||||
sources=["/user/AGENTS.md"],
|
||||
)
|
||||
contents = {"/user/AGENTS.md": "# User Memory\nBe helpful."}
|
||||
result = middleware._format_agent_memory(contents)
|
||||
|
||||
assert "<agent_memory>" in result
|
||||
assert "</agent_memory>" in result
|
||||
# Location and content should both be present
|
||||
assert "/user/AGENTS.md" in result
|
||||
assert "# User Memory" in result
|
||||
assert "Be helpful." in result
|
||||
# Location should appear before its content
|
||||
loc_pos = result.find("/user/AGENTS.md")
|
||||
content_pos = result.find("# User Memory")
|
||||
assert loc_pos < content_pos
|
||||
|
||||
|
||||
def test_format_agent_memory_multiple() -> None:
|
||||
"""Test formatting with multiple sources shows each location with its content."""
|
||||
middleware = MemoryMiddleware(
|
||||
backend=None, # type: ignore
|
||||
sources=[
|
||||
"/user/AGENTS.md",
|
||||
"/project/AGENTS.md",
|
||||
],
|
||||
)
|
||||
contents = {
|
||||
"/user/AGENTS.md": "User preferences here",
|
||||
"/project/AGENTS.md": "Project guidelines here",
|
||||
}
|
||||
result = middleware._format_agent_memory(contents)
|
||||
|
||||
assert "<agent_memory>" in result
|
||||
assert "</agent_memory>" in result
|
||||
# Both locations and contents should be present
|
||||
assert "/user/AGENTS.md" in result
|
||||
assert "User preferences here" in result
|
||||
assert "/project/AGENTS.md" in result
|
||||
assert "Project guidelines here" in result
|
||||
|
||||
|
||||
def test_format_agent_memory_preserves_order() -> None:
|
||||
"""Test that content order matches sources order."""
|
||||
middleware = MemoryMiddleware(
|
||||
backend=None, # type: ignore
|
||||
sources=[
|
||||
"/first/AGENTS.md",
|
||||
"/second/AGENTS.md",
|
||||
],
|
||||
)
|
||||
# Dict order doesn't match sources order
|
||||
contents = {"/second/AGENTS.md": "Second content", "/first/AGENTS.md": "First content"}
|
||||
result = middleware._format_agent_memory(contents)
|
||||
|
||||
# First should appear before second (based on sources order, not dict order)
|
||||
first_pos = result.find("First content")
|
||||
second_pos = result.find("Second content")
|
||||
assert first_pos >= 0 # Found in result
|
||||
assert second_pos > 0 # Found after start
|
||||
assert first_pos < second_pos # First appears before second
|
||||
|
||||
|
||||
def test_format_agent_memory_skips_missing_sources() -> None:
|
||||
"""Test that sources without content are skipped entirely."""
|
||||
middleware = MemoryMiddleware(
|
||||
backend=None, # type: ignore
|
||||
sources=[
|
||||
"/user/AGENTS.md",
|
||||
"/project/AGENTS.md",
|
||||
],
|
||||
)
|
||||
# Only provide content for user, not project
|
||||
contents = {"/user/AGENTS.md": "User content only"}
|
||||
result = middleware._format_agent_memory(contents)
|
||||
|
||||
assert "<agent_memory>" in result
|
||||
assert "/user/AGENTS.md" in result
|
||||
assert "User content only" in result
|
||||
# Missing source should not appear at all
|
||||
assert "/project/AGENTS.md" not in result
|
||||
|
||||
|
||||
def test_format_agent_memory_location_content_pairing() -> None:
|
||||
"""Test that each location is immediately followed by its content."""
|
||||
middleware = MemoryMiddleware(
|
||||
backend=None, # type: ignore
|
||||
sources=[
|
||||
"/first/AGENTS.md",
|
||||
"/second/AGENTS.md",
|
||||
],
|
||||
)
|
||||
contents = {
|
||||
"/first/AGENTS.md": "First content here",
|
||||
"/second/AGENTS.md": "Second content here",
|
||||
}
|
||||
result = middleware._format_agent_memory(contents)
|
||||
|
||||
# Each location should be followed by its own content before the next location
|
||||
first_loc = result.find("/first/AGENTS.md")
|
||||
first_content = result.find("First content here")
|
||||
second_loc = result.find("/second/AGENTS.md")
|
||||
second_content = result.find("Second content here")
|
||||
|
||||
# Order should be: first_loc < first_content < second_loc < second_content
|
||||
assert first_loc < first_content < second_loc < second_content
|
||||
|
||||
|
||||
def test_load_memory_from_backend_single_source(tmp_path: Path) -> None:
|
||||
"""Test loading memory from a single source using filesystem backend."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create memory file using backend's upload_files interface
|
||||
memory_dir = tmp_path / "user"
|
||||
memory_path = str(memory_dir / "AGENTS.md")
|
||||
memory_content = make_memory_content(
|
||||
"User Preferences",
|
||||
"""- Always use type hints
|
||||
- Prefer functional patterns
|
||||
- Be concise""",
|
||||
)
|
||||
|
||||
responses = backend.upload_files([(memory_path, memory_content.encode("utf-8"))])
|
||||
assert responses[0].error is None
|
||||
|
||||
# Create middleware
|
||||
sources: list[str] = [memory_path]
|
||||
middleware = MemoryMiddleware(backend=backend, sources=sources)
|
||||
|
||||
# Test before_agent loads the memory
|
||||
result = middleware.before_agent({}, None, {}) # type: ignore
|
||||
|
||||
assert result is not None
|
||||
assert "memory_contents" in result
|
||||
assert memory_path in result["memory_contents"]
|
||||
assert "type hints" in result["memory_contents"][memory_path]
|
||||
assert "functional patterns" in result["memory_contents"][memory_path]
|
||||
|
||||
|
||||
def test_load_memory_from_backend_multiple_sources(tmp_path: Path) -> None:
|
||||
"""Test loading memory from multiple sources."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create multiple memory files
|
||||
user_path = str(tmp_path / "user" / "AGENTS.md")
|
||||
project_path = str(tmp_path / "project" / "AGENTS.md")
|
||||
|
||||
user_content = make_memory_content("User Preferences", "- Use Python 3.11+\n- Follow PEP 8")
|
||||
project_content = make_memory_content("Project Guidelines", "## Architecture\nThis is a FastAPI project.")
|
||||
|
||||
responses = backend.upload_files(
|
||||
[
|
||||
(user_path, user_content.encode("utf-8")),
|
||||
(project_path, project_content.encode("utf-8")),
|
||||
]
|
||||
)
|
||||
assert all(r.error is None for r in responses)
|
||||
|
||||
# Create middleware with multiple sources
|
||||
sources: list[str] = [
|
||||
user_path,
|
||||
project_path,
|
||||
]
|
||||
middleware = MemoryMiddleware(backend=backend, sources=sources)
|
||||
|
||||
# Test before_agent loads all memory
|
||||
result = middleware.before_agent({}, None, {}) # type: ignore
|
||||
|
||||
assert result is not None
|
||||
assert "memory_contents" in result
|
||||
assert user_path in result["memory_contents"]
|
||||
assert project_path in result["memory_contents"]
|
||||
assert "Python 3.11" in result["memory_contents"][user_path]
|
||||
assert "FastAPI" in result["memory_contents"][project_path]
|
||||
|
||||
|
||||
def test_load_memory_handles_missing_file(tmp_path: Path) -> None:
|
||||
"""Test that missing files raise an error."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create only one of two memory files
|
||||
user_path = str(tmp_path / "user" / "AGENTS.md")
|
||||
missing_path = str(tmp_path / "nonexistent" / "AGENTS.md")
|
||||
|
||||
user_content = make_memory_content("User Preferences", "- Be helpful")
|
||||
backend.upload_files([(user_path, user_content.encode("utf-8"))])
|
||||
|
||||
# Create middleware with existing and missing sources
|
||||
sources: list[str] = [
|
||||
missing_path,
|
||||
user_path,
|
||||
]
|
||||
middleware = MemoryMiddleware(backend=backend, sources=sources)
|
||||
|
||||
# Test before_agent loads only existing memory
|
||||
result = middleware.before_agent({}, None, {}) # type: ignore
|
||||
assert result is not None
|
||||
assert missing_path not in result["memory_contents"]
|
||||
assert user_path in result["memory_contents"]
|
||||
|
||||
|
||||
def test_before_agent_skips_if_already_loaded(tmp_path: Path) -> None:
|
||||
"""Test that before_agent doesn't reload if already in state."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
user_path = str(tmp_path / "user" / "AGENTS.md")
|
||||
user_content = make_memory_content("User Preferences", "- Some content")
|
||||
backend.upload_files([(user_path, user_content.encode("utf-8"))])
|
||||
|
||||
sources: list[str] = [user_path]
|
||||
middleware = MemoryMiddleware(backend=backend, sources=sources)
|
||||
|
||||
# Pre-populate state
|
||||
state = {"memory_contents": {user_path: "Already loaded content"}}
|
||||
result = middleware.before_agent(state, None, {}) # type: ignore
|
||||
|
||||
# Should return None (no update needed)
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_load_memory_with_empty_sources(tmp_path: Path) -> None:
|
||||
"""Test middleware with empty sources list."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
middleware = MemoryMiddleware(backend=backend, sources=[])
|
||||
|
||||
result = middleware.before_agent({}, None, {}) # type: ignore
|
||||
|
||||
assert result is not None
|
||||
assert result["memory_contents"] == {}
|
||||
|
||||
|
||||
def test_memory_content_with_special_characters(tmp_path: Path) -> None:
|
||||
"""Test that special characters in memory are handled."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
memory_path = str(tmp_path / "test" / "AGENTS.md")
|
||||
memory_content = make_memory_content(
|
||||
"Special Characters",
|
||||
"""- Use `backticks` for code
|
||||
- <xml> tags should work
|
||||
- "Quotes" and 'apostrophes'
|
||||
- {braces} and [brackets]""",
|
||||
)
|
||||
|
||||
backend.upload_files([(memory_path, memory_content.encode("utf-8"))])
|
||||
|
||||
middleware = MemoryMiddleware(
|
||||
backend=backend,
|
||||
sources=[memory_path],
|
||||
)
|
||||
|
||||
result = middleware.before_agent({}, None, {}) # type: ignore
|
||||
|
||||
assert result is not None
|
||||
content = result["memory_contents"][memory_path]
|
||||
assert "`backticks`" in content
|
||||
assert "<xml>" in content
|
||||
assert '"Quotes"' in content
|
||||
assert "{braces}" in content
|
||||
|
||||
|
||||
def test_memory_content_with_unicode(tmp_path: Path) -> None:
|
||||
"""Test that unicode characters in memory are handled."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
memory_path = str(tmp_path / "test" / "AGENTS.md")
|
||||
memory_content = make_memory_content(
|
||||
"Unicode Content",
|
||||
"""- 日本語 (Japanese)
|
||||
- 中文 (Chinese)
|
||||
- Emoji: 🚀 🎉 ✨
|
||||
- Math: ∀x∈ℝ, x² ≥ 0""",
|
||||
)
|
||||
|
||||
backend.upload_files([(memory_path, memory_content.encode("utf-8"))])
|
||||
|
||||
middleware = MemoryMiddleware(
|
||||
backend=backend,
|
||||
sources=[memory_path],
|
||||
)
|
||||
|
||||
result = middleware.before_agent({}, None, {}) # type: ignore
|
||||
|
||||
assert result is not None
|
||||
content = result["memory_contents"][memory_path]
|
||||
assert "日本語" in content
|
||||
assert "中文" in content
|
||||
assert "🚀" in content
|
||||
assert "∀x∈ℝ" in content
|
||||
|
||||
|
||||
def test_memory_content_with_large_file(tmp_path: Path) -> None:
|
||||
"""Test that large memory files are loaded correctly."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
memory_path = str(tmp_path / "test" / "AGENTS.md")
|
||||
# Create a large memory file (around 10KB)
|
||||
large_content = make_memory_content("Large Memory", "Line of content\n" * 500)
|
||||
|
||||
backend.upload_files([(memory_path, large_content.encode("utf-8"))])
|
||||
|
||||
middleware = MemoryMiddleware(
|
||||
backend=backend,
|
||||
sources=[memory_path],
|
||||
)
|
||||
|
||||
result = middleware.before_agent({}, None, {}) # type: ignore
|
||||
|
||||
assert result is not None
|
||||
content = result["memory_contents"][memory_path]
|
||||
# Verify content was loaded (check for repeated pattern)
|
||||
assert content.count("Line of content") == 500
|
||||
|
||||
|
||||
def test_agent_with_memory_middleware_system_prompt(tmp_path: Path) -> None:
|
||||
"""Test that memory middleware injects memory into the system prompt."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
memory_path = str(tmp_path / "user" / "AGENTS.md")
|
||||
memory_content = make_memory_content(
|
||||
"User Preferences",
|
||||
"""- Always use type hints
|
||||
- Prefer functional programming
|
||||
- Be concise""",
|
||||
)
|
||||
|
||||
responses = backend.upload_files([(memory_path, memory_content.encode("utf-8"))])
|
||||
assert responses[0].error is None
|
||||
|
||||
# Create a fake chat model that we can inspect
|
||||
fake_model = GenericFakeChatModel(messages=iter([AIMessage(content="I understand your preferences.")]))
|
||||
|
||||
# Create middleware
|
||||
sources: list[str] = [memory_path]
|
||||
middleware = MemoryMiddleware(backend=backend, sources=sources)
|
||||
|
||||
# Create agent with middleware
|
||||
agent = create_agent(
|
||||
model=fake_model,
|
||||
middleware=[middleware],
|
||||
)
|
||||
|
||||
# Invoke the agent
|
||||
result = agent.invoke({"messages": [HumanMessage(content="Hello")]})
|
||||
|
||||
# Verify the agent was invoked
|
||||
assert "messages" in result
|
||||
assert len(result["messages"]) > 0
|
||||
|
||||
# Inspect the call history to verify system prompt was injected
|
||||
assert len(fake_model.call_history) > 0, "Model should have been called at least once"
|
||||
|
||||
# Get the first call
|
||||
first_call = fake_model.call_history[0]
|
||||
messages = first_call["messages"]
|
||||
|
||||
system_message = messages[0]
|
||||
assert system_message.type == "system", "First message should be system prompt"
|
||||
content = system_message.text
|
||||
assert "<agent_memory>" in content, "System prompt should contain <agent_memory> tags"
|
||||
assert memory_path in content, "System prompt should contain memory path"
|
||||
assert "type hints" in content, "System prompt should mention memory content"
|
||||
assert "functional programming" in content
|
||||
|
||||
|
||||
def test_agent_with_memory_middleware_multiple_sources(tmp_path: Path) -> None:
|
||||
"""Test agent with memory from multiple sources."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create multiple memory files
|
||||
user_path = str(tmp_path / "user" / "AGENTS.md")
|
||||
project_path = str(tmp_path / "project" / "AGENTS.md")
|
||||
|
||||
user_content = make_memory_content("User Style", "- Use Python 3.11+")
|
||||
project_content = make_memory_content("Project Info", "- FastAPI backend")
|
||||
|
||||
responses = backend.upload_files(
|
||||
[
|
||||
(user_path, user_content.encode("utf-8")),
|
||||
(project_path, project_content.encode("utf-8")),
|
||||
]
|
||||
)
|
||||
assert all(r.error is None for r in responses)
|
||||
|
||||
# Create fake model
|
||||
fake_model = GenericFakeChatModel(messages=iter([AIMessage(content="I see both user and project preferences.")]))
|
||||
|
||||
# Create middleware with multiple sources
|
||||
sources: list[str] = [
|
||||
user_path,
|
||||
project_path,
|
||||
]
|
||||
middleware = MemoryMiddleware(backend=backend, sources=sources)
|
||||
|
||||
# Create agent
|
||||
agent = create_agent(model=fake_model, middleware=[middleware])
|
||||
|
||||
# Invoke
|
||||
result = agent.invoke({"messages": [HumanMessage(content="Help me")]})
|
||||
|
||||
assert "messages" in result
|
||||
assert len(result["messages"]) > 0
|
||||
|
||||
# Verify both memory sources are in system prompt with new format
|
||||
first_call = fake_model.call_history[0]
|
||||
system_message = first_call["messages"][0]
|
||||
content = system_message.text
|
||||
|
||||
assert "<agent_memory>" in content
|
||||
assert user_path in content
|
||||
assert project_path in content
|
||||
assert "Python 3.11" in content
|
||||
assert "FastAPI" in content
|
||||
|
||||
|
||||
def test_agent_with_memory_middleware_empty_sources(tmp_path: Path) -> None:
|
||||
"""Test that agent works with empty memory sources."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create fake model
|
||||
fake_model = GenericFakeChatModel(messages=iter([AIMessage(content="Working without memory.")]))
|
||||
|
||||
# Create middleware with empty sources
|
||||
middleware = MemoryMiddleware(backend=backend, sources=[])
|
||||
|
||||
# Create agent
|
||||
agent = create_agent(model=fake_model, middleware=[middleware])
|
||||
|
||||
# Invoke
|
||||
result = agent.invoke({"messages": [HumanMessage(content="Hello")]})
|
||||
|
||||
assert "messages" in result
|
||||
assert len(result["messages"]) > 0
|
||||
|
||||
# Verify system prompt still contains Agent Memory section with empty agent_memory
|
||||
first_call = fake_model.call_history[0]
|
||||
system_message = first_call["messages"][0]
|
||||
content = system_message.text
|
||||
|
||||
assert "<agent_memory>" in content
|
||||
assert "No memory loaded" in content
|
||||
|
||||
|
||||
async def test_agent_with_memory_middleware_async(tmp_path: Path) -> None:
|
||||
"""Test that memory middleware works with async agent invocation."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
memory_path = str(tmp_path / "user" / "AGENTS.md")
|
||||
memory_content = make_memory_content("Async Test", "- Test async loading")
|
||||
|
||||
responses = backend.upload_files([(memory_path, memory_content.encode("utf-8"))])
|
||||
assert responses[0].error is None
|
||||
|
||||
# Create fake model
|
||||
fake_model = GenericFakeChatModel(messages=iter([AIMessage(content="Async invocation successful.")]))
|
||||
|
||||
# Create middleware
|
||||
sources: list[str] = [memory_path]
|
||||
middleware = MemoryMiddleware(backend=backend, sources=sources)
|
||||
|
||||
# Create agent
|
||||
agent = create_agent(model=fake_model, middleware=[middleware])
|
||||
|
||||
# Invoke asynchronously
|
||||
result = await agent.ainvoke({"messages": [HumanMessage(content="Hello")]})
|
||||
|
||||
assert "messages" in result
|
||||
assert len(result["messages"]) > 0
|
||||
|
||||
# Verify memory_contents is NOT in final state (it's private)
|
||||
assert "memory_contents" not in result
|
||||
|
||||
# Verify memory was injected in system prompt with new format
|
||||
first_call = fake_model.call_history[0]
|
||||
system_message = first_call["messages"][0]
|
||||
content = system_message.text
|
||||
|
||||
assert "<agent_memory>" in content
|
||||
assert memory_path in content
|
||||
assert "Test async loading" in content
|
||||
|
||||
|
||||
def test_memory_middleware_with_state_backend_factory() -> None:
|
||||
"""Test that MemoryMiddleware can be initialized with StateBackend factory."""
|
||||
sources: list[str] = ["/memory/AGENTS.md"]
|
||||
middleware = MemoryMiddleware(
|
||||
backend=lambda rt: StateBackend(rt),
|
||||
sources=sources,
|
||||
)
|
||||
|
||||
# Verify the middleware was created successfully
|
||||
assert middleware is not None
|
||||
assert callable(middleware._backend)
|
||||
assert len(middleware.sources) == 1
|
||||
assert middleware.sources[0] == "/memory/AGENTS.md"
|
||||
|
||||
# Create a mock Runtime (simplified for testing)
|
||||
state = {"messages": [], "files": {}}
|
||||
runtime = SimpleNamespace(
|
||||
context=None,
|
||||
store=None,
|
||||
stream_writer=lambda _: None,
|
||||
)
|
||||
|
||||
backend = middleware._get_backend(state, runtime, {}) # type: ignore
|
||||
assert isinstance(backend, StateBackend)
|
||||
assert backend.runtime is not None
|
||||
|
||||
|
||||
def test_memory_middleware_with_store_backend_factory() -> None:
|
||||
"""Test that MemoryMiddleware can be initialized with StoreBackend factory."""
|
||||
sources: list[str] = ["/memory/AGENTS.md"]
|
||||
middleware = MemoryMiddleware(
|
||||
backend=lambda rt: StoreBackend(rt),
|
||||
sources=sources,
|
||||
)
|
||||
|
||||
# Verify the middleware was created successfully
|
||||
assert middleware is not None
|
||||
assert callable(middleware._backend)
|
||||
|
||||
# Create a mock Runtime with store
|
||||
store = InMemoryStore()
|
||||
state = {"messages": []}
|
||||
runtime = SimpleNamespace(
|
||||
context=None,
|
||||
store=store,
|
||||
stream_writer=lambda _: None,
|
||||
)
|
||||
|
||||
backend = middleware._get_backend(state, runtime, {}) # type: ignore
|
||||
assert isinstance(backend, StoreBackend)
|
||||
assert backend.runtime is not None
|
||||
|
||||
|
||||
def test_memory_middleware_with_store_backend_assistant_id() -> None:
|
||||
"""Test namespace isolation: each assistant_id gets its own memory namespace."""
|
||||
# Setup
|
||||
middleware = MemoryMiddleware(
|
||||
backend=lambda rt: StoreBackend(rt),
|
||||
sources=["/memory/AGENTS.md"],
|
||||
)
|
||||
store = InMemoryStore()
|
||||
runtime = SimpleNamespace(context=None, store=store, stream_writer=lambda _: None)
|
||||
|
||||
# Add memory for assistant-123 with namespace (assistant-123, filesystem)
|
||||
assistant_1_content = make_memory_content("Assistant 1", "- Context for assistant 1")
|
||||
store.put(
|
||||
("assistant-123", "filesystem"),
|
||||
"/memory/AGENTS.md",
|
||||
create_store_memory_item(assistant_1_content),
|
||||
)
|
||||
|
||||
# Test: assistant-123 can read its own memory
|
||||
config_1 = {"metadata": {"assistant_id": "assistant-123"}}
|
||||
result_1 = middleware.before_agent({}, runtime, config_1) # type: ignore
|
||||
|
||||
assert result_1 is not None
|
||||
assert "/memory/AGENTS.md" in result_1["memory_contents"]
|
||||
assert "Context for assistant 1" in result_1["memory_contents"]["/memory/AGENTS.md"]
|
||||
|
||||
# Test: assistant-456 cannot see assistant-123's memory (different namespace)
|
||||
config_2 = {"metadata": {"assistant_id": "assistant-456"}}
|
||||
result_2 = middleware.before_agent({}, runtime, config_2) # type: ignore
|
||||
assert result_2 is not None
|
||||
assert len(result_2["memory_contents"]) == 0
|
||||
|
||||
# Add memory for assistant-456 with namespace (assistant-456, filesystem)
|
||||
assistant_2_content = make_memory_content("Assistant 2", "- Context for assistant 2")
|
||||
store.put(
|
||||
("assistant-456", "filesystem"),
|
||||
"/memory/AGENTS.md",
|
||||
create_store_memory_item(assistant_2_content),
|
||||
)
|
||||
|
||||
# Test: assistant-456 can read its own memory
|
||||
result_3 = middleware.before_agent({}, runtime, config_2) # type: ignore
|
||||
|
||||
assert result_3 is not None
|
||||
assert "/memory/AGENTS.md" in result_3["memory_contents"]
|
||||
assert "Context for assistant 2" in result_3["memory_contents"]["/memory/AGENTS.md"]
|
||||
assert "Context for assistant 1" not in result_3["memory_contents"]["/memory/AGENTS.md"]
|
||||
|
||||
# Test: assistant-123 still only sees its own memory (no cross-contamination)
|
||||
result_4 = middleware.before_agent({}, runtime, config_1) # type: ignore
|
||||
|
||||
assert result_4 is not None
|
||||
assert "/memory/AGENTS.md" in result_4["memory_contents"]
|
||||
assert "Context for assistant 1" in result_4["memory_contents"]["/memory/AGENTS.md"]
|
||||
assert "Context for assistant 2" not in result_4["memory_contents"]["/memory/AGENTS.md"]
|
||||
|
||||
|
||||
def test_memory_middleware_with_store_backend_no_assistant_id() -> None:
|
||||
"""Test default namespace: when no assistant_id is provided, uses (filesystem,) namespace."""
|
||||
# Setup
|
||||
middleware = MemoryMiddleware(
|
||||
backend=lambda rt: StoreBackend(rt),
|
||||
sources=["/memory/AGENTS.md"],
|
||||
)
|
||||
store = InMemoryStore()
|
||||
runtime = SimpleNamespace(context=None, store=store, stream_writer=lambda _: None)
|
||||
|
||||
# Add memory to default namespace (filesystem,) - no assistant_id
|
||||
shared_content = make_memory_content("Shared Memory", "- Default namespace context")
|
||||
store.put(
|
||||
("filesystem",),
|
||||
"/memory/AGENTS.md",
|
||||
create_store_memory_item(shared_content),
|
||||
)
|
||||
|
||||
# Test: empty config accesses default namespace
|
||||
result_1 = middleware.before_agent({}, runtime, {}) # type: ignore
|
||||
|
||||
assert result_1 is not None
|
||||
assert "/memory/AGENTS.md" in result_1["memory_contents"]
|
||||
assert "Default namespace context" in result_1["memory_contents"]["/memory/AGENTS.md"]
|
||||
|
||||
# Test: config with metadata but no assistant_id also uses default namespace
|
||||
config_with_other_metadata = {"metadata": {"some_other_key": "value"}}
|
||||
result_2 = middleware.before_agent({}, runtime, config_with_other_metadata) # type: ignore
|
||||
|
||||
assert result_2 is not None
|
||||
assert "/memory/AGENTS.md" in result_2["memory_contents"]
|
||||
assert "Default namespace context" in result_2["memory_contents"]["/memory/AGENTS.md"]
|
||||
|
||||
|
||||
def test_create_deep_agent_with_memory_and_filesystem_backend(tmp_path: Path) -> None:
|
||||
"""Test end-to-end: create_deep_agent with memory parameter and FilesystemBackend."""
|
||||
# Create memory on filesystem
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
memory_path = str(tmp_path / "user" / "AGENTS.md")
|
||||
memory_content = make_memory_content("Deep Agent Test", "- Use deep agents wisely")
|
||||
|
||||
backend.upload_files([(memory_path, memory_content.encode("utf-8"))])
|
||||
|
||||
# Create agent with memory parameter
|
||||
agent = create_deep_agent(
|
||||
backend=backend,
|
||||
memory=[memory_path],
|
||||
model=GenericFakeChatModel(messages=iter([AIMessage(content="Memory loaded successfully.")])),
|
||||
)
|
||||
|
||||
# Invoke agent
|
||||
result = agent.invoke({"messages": [HumanMessage(content="What do you know?")]})
|
||||
|
||||
# Verify invocation succeeded
|
||||
assert "messages" in result
|
||||
assert len(result["messages"]) > 0
|
||||
|
||||
|
||||
def test_create_deep_agent_with_memory_missing_files(tmp_path: Path) -> None:
|
||||
"""Test that memory works gracefully when files don't exist."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create agent with non-existent memory files
|
||||
agent = create_deep_agent(
|
||||
backend=backend,
|
||||
memory=[str(tmp_path / "nonexistent" / "AGENTS.md")],
|
||||
model=GenericFakeChatModel(messages=iter([AIMessage(content="No memory, but that's okay.")])),
|
||||
)
|
||||
|
||||
# Invoke agent - should succeed even without memory file
|
||||
result = agent.invoke({"messages": [HumanMessage(content="Hello")]})
|
||||
assert "messages" in result
|
||||
|
||||
|
||||
def test_create_deep_agent_with_memory_default_backend() -> None:
|
||||
"""Test create_deep_agent with memory parameter using default backend (StateBackend).
|
||||
|
||||
When no backend is specified, StateBackend is used by tools. The MemoryMiddleware
|
||||
should receive a StateBackend factory and be able to load memory from state files.
|
||||
"""
|
||||
checkpointer = InMemorySaver()
|
||||
agent = create_deep_agent(
|
||||
memory=["/user/.deepagents/AGENTS.md"],
|
||||
model=GenericFakeChatModel(messages=iter([AIMessage(content="Working with default backend.")])),
|
||||
checkpointer=checkpointer,
|
||||
)
|
||||
|
||||
# Create memory content
|
||||
memory_content = make_memory_content("User Memory", "- Be helpful and concise")
|
||||
timestamp = datetime.now(UTC).isoformat()
|
||||
|
||||
# Prepare files dict with FileData format (for StateBackend)
|
||||
memory_files = {
|
||||
"/user/.deepagents/AGENTS.md": {
|
||||
"content": memory_content.split("\n"),
|
||||
"created_at": timestamp,
|
||||
"modified_at": timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
config: RunnableConfig = {"configurable": {"thread_id": "123"}}
|
||||
|
||||
# Invoke agent with files parameter
|
||||
result = agent.invoke(
|
||||
{
|
||||
"messages": [HumanMessage(content="What's in your memory?")],
|
||||
"files": memory_files,
|
||||
},
|
||||
config,
|
||||
)
|
||||
|
||||
assert len(result["messages"]) > 0
|
||||
|
||||
# Verify memory was loaded from state
|
||||
checkpoint = agent.checkpointer.get(config)
|
||||
assert "/user/.deepagents/AGENTS.md" in checkpoint["channel_values"]["files"]
|
||||
assert "memory_contents" in checkpoint["channel_values"]
|
||||
assert "/user/.deepagents/AGENTS.md" in checkpoint["channel_values"]["memory_contents"]
|
||||
|
||||
|
||||
def test_memory_middleware_order_matters(tmp_path: Path) -> None:
|
||||
"""Test that memory sources are combined in order."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create memory files
|
||||
first_path = str(tmp_path / "first" / "AGENTS.md")
|
||||
second_path = str(tmp_path / "second" / "AGENTS.md")
|
||||
|
||||
first_content = make_memory_content("First", "First memory content")
|
||||
second_content = make_memory_content("Second", "Second memory content")
|
||||
|
||||
backend.upload_files(
|
||||
[
|
||||
(first_path, first_content.encode("utf-8")),
|
||||
(second_path, second_content.encode("utf-8")),
|
||||
]
|
||||
)
|
||||
|
||||
# Create fake model
|
||||
fake_model = GenericFakeChatModel(messages=iter([AIMessage(content="Understood.")]))
|
||||
|
||||
# Create middleware with specific order
|
||||
sources: list[str] = [
|
||||
first_path,
|
||||
second_path,
|
||||
]
|
||||
middleware = MemoryMiddleware(backend=backend, sources=sources)
|
||||
|
||||
# Create agent
|
||||
agent = create_agent(model=fake_model, middleware=[middleware])
|
||||
|
||||
# Invoke
|
||||
result = agent.invoke({"messages": [HumanMessage(content="Test")]})
|
||||
|
||||
# Verify order in system prompt with new format
|
||||
first_call = fake_model.call_history[0]
|
||||
system_message = first_call["messages"][0]
|
||||
content = system_message.text
|
||||
|
||||
assert "<agent_memory>" in content
|
||||
assert first_path in content
|
||||
assert second_path in content
|
||||
|
||||
# First should appear before second (both path and content)
|
||||
first_pos = content.find("First memory content")
|
||||
second_pos = content.find("Second memory content")
|
||||
assert first_pos > 0
|
||||
assert second_pos > 0
|
||||
assert first_pos < second_pos
|
||||
@@ -0,0 +1,365 @@
|
||||
"""Async unit tests for memory middleware with FilesystemBackend.
|
||||
|
||||
This module contains async versions of memory middleware tests.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from langchain.agents import create_agent
|
||||
from langchain_core.messages import AIMessage, HumanMessage
|
||||
|
||||
from deepagents.backends.filesystem import FilesystemBackend
|
||||
from deepagents.middleware.memory import MemoryMiddleware
|
||||
from tests.unit_tests.chat_model import GenericFakeChatModel
|
||||
|
||||
|
||||
def make_memory_content(title: str, content: str) -> str:
|
||||
"""Create AGENTS.md content.
|
||||
|
||||
Args:
|
||||
title: Title for the memory file
|
||||
content: Content body
|
||||
|
||||
Returns:
|
||||
Complete AGENTS.md content as string
|
||||
"""
|
||||
return f"""# {title}
|
||||
|
||||
{content}
|
||||
"""
|
||||
|
||||
|
||||
async def test_load_memory_from_backend_single_source_async(tmp_path: Path) -> None:
|
||||
"""Test loading memory from a single source using filesystem backend (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create memory file using backend's upload_files interface
|
||||
memory_dir = tmp_path / "user"
|
||||
memory_path = str(memory_dir / "AGENTS.md")
|
||||
memory_content = make_memory_content(
|
||||
"User Preferences",
|
||||
"""- Always use type hints
|
||||
- Prefer functional patterns
|
||||
- Be concise""",
|
||||
)
|
||||
|
||||
responses = backend.upload_files([(memory_path, memory_content.encode("utf-8"))])
|
||||
assert responses[0].error is None
|
||||
|
||||
# Create middleware
|
||||
sources: list[str] = [memory_path]
|
||||
middleware = MemoryMiddleware(backend=backend, sources=sources)
|
||||
|
||||
# Test abefore_agent loads the memory
|
||||
result = await middleware.abefore_agent({}, None, {}) # type: ignore
|
||||
|
||||
assert result is not None
|
||||
assert "memory_contents" in result
|
||||
assert memory_path in result["memory_contents"]
|
||||
assert "type hints" in result["memory_contents"][memory_path]
|
||||
assert "functional patterns" in result["memory_contents"][memory_path]
|
||||
|
||||
|
||||
async def test_load_memory_from_backend_multiple_sources_async(tmp_path: Path) -> None:
|
||||
"""Test loading memory from multiple sources (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create multiple memory files
|
||||
user_path = str(tmp_path / "user" / "AGENTS.md")
|
||||
project_path = str(tmp_path / "project" / "AGENTS.md")
|
||||
|
||||
user_content = make_memory_content("User Preferences", "- Use Python 3.11+\n- Follow PEP 8")
|
||||
project_content = make_memory_content("Project Guidelines", "## Architecture\nThis is a FastAPI project.")
|
||||
|
||||
responses = backend.upload_files(
|
||||
[
|
||||
(user_path, user_content.encode("utf-8")),
|
||||
(project_path, project_content.encode("utf-8")),
|
||||
]
|
||||
)
|
||||
assert all(r.error is None for r in responses)
|
||||
|
||||
# Create middleware with multiple sources
|
||||
sources: list[str] = [
|
||||
user_path,
|
||||
project_path,
|
||||
]
|
||||
middleware = MemoryMiddleware(backend=backend, sources=sources)
|
||||
|
||||
# Test abefore_agent loads all memory
|
||||
result = await middleware.abefore_agent({}, None, {}) # type: ignore
|
||||
|
||||
assert result is not None
|
||||
assert "memory_contents" in result
|
||||
assert user_path in result["memory_contents"]
|
||||
assert project_path in result["memory_contents"]
|
||||
assert "Python 3.11" in result["memory_contents"][user_path]
|
||||
assert "FastAPI" in result["memory_contents"][project_path]
|
||||
|
||||
|
||||
async def test_load_memory_handles_missing_file_async(tmp_path: Path) -> None:
|
||||
"""Test that missing files raise an error (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create only one of two memory files
|
||||
user_path = str(tmp_path / "user" / "AGENTS.md")
|
||||
missing_path = str(tmp_path / "nonexistent" / "AGENTS.md")
|
||||
|
||||
user_content = make_memory_content("User Preferences", "- Be helpful")
|
||||
backend.upload_files([(user_path, user_content.encode("utf-8"))])
|
||||
|
||||
# Create middleware with existing and missing sources
|
||||
sources: list[str] = [
|
||||
missing_path,
|
||||
user_path,
|
||||
]
|
||||
middleware = MemoryMiddleware(backend=backend, sources=sources)
|
||||
|
||||
# Test abefore_agent loads only existing memory
|
||||
result = await middleware.abefore_agent({}, None, {}) # type: ignore
|
||||
assert result is not None
|
||||
assert missing_path not in result["memory_contents"]
|
||||
assert user_path in result["memory_contents"]
|
||||
|
||||
|
||||
async def test_before_agent_skips_if_already_loaded_async(tmp_path: Path) -> None:
|
||||
"""Test that abefore_agent doesn't reload if already in state."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
user_path = str(tmp_path / "user" / "AGENTS.md")
|
||||
user_content = make_memory_content("User Preferences", "- Some content")
|
||||
backend.upload_files([(user_path, user_content.encode("utf-8"))])
|
||||
|
||||
sources: list[str] = [user_path]
|
||||
middleware = MemoryMiddleware(backend=backend, sources=sources)
|
||||
|
||||
# Pre-populate state
|
||||
state = {"memory_contents": {user_path: "Already loaded content"}}
|
||||
result = await middleware.abefore_agent(state, None, {}) # type: ignore
|
||||
|
||||
# Should return None (no update needed)
|
||||
assert result is None
|
||||
|
||||
|
||||
async def test_load_memory_with_empty_sources_async(tmp_path: Path) -> None:
|
||||
"""Test middleware with empty sources list (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
middleware = MemoryMiddleware(backend=backend, sources=[])
|
||||
|
||||
result = await middleware.abefore_agent({}, None, {}) # type: ignore
|
||||
|
||||
assert result is not None
|
||||
assert result["memory_contents"] == {}
|
||||
|
||||
|
||||
async def test_memory_content_with_special_characters_async(tmp_path: Path) -> None:
|
||||
"""Test that special characters in memory are handled (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
memory_path = str(tmp_path / "test" / "AGENTS.md")
|
||||
memory_content = make_memory_content(
|
||||
"Special Characters",
|
||||
"""- Use `backticks` for code
|
||||
- <xml> tags should work
|
||||
- "Quotes" and 'apostrophes'
|
||||
- {braces} and [brackets]""",
|
||||
)
|
||||
|
||||
backend.upload_files([(memory_path, memory_content.encode("utf-8"))])
|
||||
|
||||
middleware = MemoryMiddleware(
|
||||
backend=backend,
|
||||
sources=[memory_path],
|
||||
)
|
||||
|
||||
result = await middleware.abefore_agent({}, None, {}) # type: ignore
|
||||
|
||||
assert result is not None
|
||||
content = result["memory_contents"][memory_path]
|
||||
assert "`backticks`" in content
|
||||
assert "<xml>" in content
|
||||
assert '"Quotes"' in content
|
||||
assert "{braces}" in content
|
||||
|
||||
|
||||
async def test_memory_content_with_unicode_async(tmp_path: Path) -> None:
|
||||
"""Test that unicode characters in memory are handled (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
memory_path = str(tmp_path / "test" / "AGENTS.md")
|
||||
memory_content = make_memory_content(
|
||||
"Unicode Content",
|
||||
"""- 日本語 (Japanese)
|
||||
- 中文 (Chinese)
|
||||
- Emoji: 🚀 🎉 ✨
|
||||
- Math: ∀x∈ℝ, x² ≥ 0""",
|
||||
)
|
||||
|
||||
backend.upload_files([(memory_path, memory_content.encode("utf-8"))])
|
||||
|
||||
middleware = MemoryMiddleware(
|
||||
backend=backend,
|
||||
sources=[memory_path],
|
||||
)
|
||||
|
||||
result = await middleware.abefore_agent({}, None, {}) # type: ignore
|
||||
|
||||
assert result is not None
|
||||
content = result["memory_contents"][memory_path]
|
||||
assert "日本語" in content
|
||||
assert "中文" in content
|
||||
assert "🚀" in content
|
||||
assert "∀x∈ℝ" in content
|
||||
|
||||
|
||||
async def test_memory_content_with_large_file_async(tmp_path: Path) -> None:
|
||||
"""Test that large memory files are loaded correctly (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
memory_path = str(tmp_path / "test" / "AGENTS.md")
|
||||
# Create a large memory file (around 10KB)
|
||||
large_content = make_memory_content("Large Memory", "Line of content\n" * 500)
|
||||
|
||||
backend.upload_files([(memory_path, large_content.encode("utf-8"))])
|
||||
|
||||
middleware = MemoryMiddleware(
|
||||
backend=backend,
|
||||
sources=[memory_path],
|
||||
)
|
||||
|
||||
result = await middleware.abefore_agent({}, None, {}) # type: ignore
|
||||
|
||||
assert result is not None
|
||||
content = result["memory_contents"][memory_path]
|
||||
# Verify content was loaded (check for repeated pattern)
|
||||
assert content.count("Line of content") == 500
|
||||
|
||||
|
||||
async def test_agent_with_memory_middleware_multiple_sources_async(tmp_path: Path) -> None:
|
||||
"""Test agent with memory from multiple sources (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create multiple memory files
|
||||
user_path = str(tmp_path / "user" / "AGENTS.md")
|
||||
project_path = str(tmp_path / "project" / "AGENTS.md")
|
||||
|
||||
user_content = make_memory_content("User Style", "- Use Python 3.11+")
|
||||
project_content = make_memory_content("Project Info", "- FastAPI backend")
|
||||
|
||||
responses = backend.upload_files(
|
||||
[
|
||||
(user_path, user_content.encode("utf-8")),
|
||||
(project_path, project_content.encode("utf-8")),
|
||||
]
|
||||
)
|
||||
assert all(r.error is None for r in responses)
|
||||
|
||||
# Create fake model
|
||||
fake_model = GenericFakeChatModel(messages=iter([AIMessage(content="I see both user and project preferences.")]))
|
||||
|
||||
# Create middleware with multiple sources
|
||||
sources: list[str] = [
|
||||
user_path,
|
||||
project_path,
|
||||
]
|
||||
middleware = MemoryMiddleware(backend=backend, sources=sources)
|
||||
|
||||
# Create agent
|
||||
agent = create_agent(model=fake_model, middleware=[middleware])
|
||||
|
||||
# Invoke asynchronously
|
||||
result = await agent.ainvoke({"messages": [HumanMessage(content="Help me")]})
|
||||
|
||||
assert "messages" in result
|
||||
assert len(result["messages"]) > 0
|
||||
|
||||
# Verify both memory sources are in system prompt with new format
|
||||
first_call = fake_model.call_history[0]
|
||||
system_message = first_call["messages"][0]
|
||||
content = system_message.text
|
||||
|
||||
assert "<agent_memory>" in content
|
||||
assert user_path in content
|
||||
assert project_path in content
|
||||
assert "Python 3.11" in content
|
||||
assert "FastAPI" in content
|
||||
|
||||
|
||||
async def test_agent_with_memory_middleware_empty_sources_async(tmp_path: Path) -> None:
|
||||
"""Test that agent works with empty memory sources (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create fake model
|
||||
fake_model = GenericFakeChatModel(messages=iter([AIMessage(content="Working without memory.")]))
|
||||
|
||||
# Create middleware with empty sources
|
||||
middleware = MemoryMiddleware(backend=backend, sources=[])
|
||||
|
||||
# Create agent
|
||||
agent = create_agent(model=fake_model, middleware=[middleware])
|
||||
|
||||
# Invoke asynchronously
|
||||
result = await agent.ainvoke({"messages": [HumanMessage(content="Hello")]})
|
||||
|
||||
assert "messages" in result
|
||||
assert len(result["messages"]) > 0
|
||||
|
||||
# Verify system prompt still contains Agent Memory section with empty agent_memory
|
||||
first_call = fake_model.call_history[0]
|
||||
system_message = first_call["messages"][0]
|
||||
content = system_message.text
|
||||
|
||||
assert "<agent_memory>" in content
|
||||
assert "No memory loaded" in content
|
||||
|
||||
|
||||
async def test_memory_middleware_order_matters_async(tmp_path: Path) -> None:
|
||||
"""Test that memory sources are combined in order (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create memory files
|
||||
first_path = str(tmp_path / "first" / "AGENTS.md")
|
||||
second_path = str(tmp_path / "second" / "AGENTS.md")
|
||||
|
||||
first_content = make_memory_content("First", "First memory content")
|
||||
second_content = make_memory_content("Second", "Second memory content")
|
||||
|
||||
backend.upload_files(
|
||||
[
|
||||
(first_path, first_content.encode("utf-8")),
|
||||
(second_path, second_content.encode("utf-8")),
|
||||
]
|
||||
)
|
||||
|
||||
# Create fake model
|
||||
fake_model = GenericFakeChatModel(messages=iter([AIMessage(content="Understood.")]))
|
||||
|
||||
# Create middleware with specific order
|
||||
sources: list[str] = [
|
||||
first_path,
|
||||
second_path,
|
||||
]
|
||||
middleware = MemoryMiddleware(backend=backend, sources=sources)
|
||||
|
||||
# Create agent
|
||||
agent = create_agent(model=fake_model, middleware=[middleware])
|
||||
|
||||
# Invoke asynchronously
|
||||
result = await agent.ainvoke({"messages": [HumanMessage(content="Test")]})
|
||||
|
||||
# Verify order in system prompt with new format
|
||||
first_call = fake_model.call_history[0]
|
||||
system_message = first_call["messages"][0]
|
||||
content = system_message.text
|
||||
|
||||
assert "<agent_memory>" in content
|
||||
assert first_path in content
|
||||
assert second_path in content
|
||||
|
||||
# First should appear before second (both path and content)
|
||||
first_pos = content.find("First memory content")
|
||||
second_pos = content.find("Second memory content")
|
||||
assert first_pos > 0
|
||||
assert second_pos > 0
|
||||
assert first_pos < second_pos
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,406 @@
|
||||
"""Async unit tests for skills middleware with FilesystemBackend.
|
||||
|
||||
This module contains async versions of skills middleware tests.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from langchain.agents import create_agent
|
||||
from langchain_core.messages import AIMessage, HumanMessage
|
||||
|
||||
from deepagents.backends.filesystem import FilesystemBackend
|
||||
from deepagents.middleware.skills import SkillsMiddleware, _alist_skills
|
||||
from tests.unit_tests.chat_model import GenericFakeChatModel
|
||||
|
||||
|
||||
def make_skill_content(name: str, description: str) -> str:
|
||||
"""Create SKILL.md content with YAML frontmatter.
|
||||
|
||||
Args:
|
||||
name: Skill name for frontmatter
|
||||
description: Skill description for frontmatter
|
||||
|
||||
Returns:
|
||||
Complete SKILL.md content as string
|
||||
"""
|
||||
return f"""---
|
||||
name: {name}
|
||||
description: {description}
|
||||
---
|
||||
|
||||
# {name.title()} Skill
|
||||
|
||||
Instructions go here.
|
||||
"""
|
||||
|
||||
|
||||
async def test_alist_skills_from_backend_single_skill(tmp_path: Path) -> None:
|
||||
"""Test listing a single skill from filesystem backend (async)."""
|
||||
# Create backend with actual filesystem (no virtual mode)
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create skill using backend's upload_files interface
|
||||
skills_dir = tmp_path / "skills"
|
||||
skill_path = str(skills_dir / "my-skill" / "SKILL.md")
|
||||
skill_content = make_skill_content("my-skill", "My test skill")
|
||||
|
||||
responses = backend.upload_files([(skill_path, skill_content.encode("utf-8"))])
|
||||
assert responses[0].error is None
|
||||
|
||||
# List skills using the full absolute path
|
||||
skills = await _alist_skills(backend, str(skills_dir))
|
||||
|
||||
assert skills == [
|
||||
{
|
||||
"name": "my-skill",
|
||||
"description": "My test skill",
|
||||
"path": skill_path,
|
||||
"metadata": {},
|
||||
"license": None,
|
||||
"compatibility": None,
|
||||
"allowed_tools": [],
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
async def test_alist_skills_from_backend_multiple_skills(tmp_path: Path) -> None:
|
||||
"""Test listing multiple skills from filesystem backend (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create multiple skills using backend's upload_files interface
|
||||
skills_dir = tmp_path / "skills"
|
||||
skill1_path = str(skills_dir / "skill-one" / "SKILL.md")
|
||||
skill2_path = str(skills_dir / "skill-two" / "SKILL.md")
|
||||
skill3_path = str(skills_dir / "skill-three" / "SKILL.md")
|
||||
|
||||
skill1_content = make_skill_content("skill-one", "First skill")
|
||||
skill2_content = make_skill_content("skill-two", "Second skill")
|
||||
skill3_content = make_skill_content("skill-three", "Third skill")
|
||||
|
||||
responses = backend.upload_files(
|
||||
[
|
||||
(skill1_path, skill1_content.encode("utf-8")),
|
||||
(skill2_path, skill2_content.encode("utf-8")),
|
||||
(skill3_path, skill3_content.encode("utf-8")),
|
||||
]
|
||||
)
|
||||
|
||||
assert all(r.error is None for r in responses)
|
||||
|
||||
# List skills
|
||||
skills = await _alist_skills(backend, str(skills_dir))
|
||||
|
||||
# Should return all three skills (order may vary)
|
||||
assert len(skills) == 3
|
||||
skill_names = {s["name"] for s in skills}
|
||||
assert skill_names == {"skill-one", "skill-two", "skill-three"}
|
||||
|
||||
|
||||
async def test_alist_skills_from_backend_empty_directory(tmp_path: Path) -> None:
|
||||
"""Test listing skills from an empty directory (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create empty skills directory
|
||||
skills_dir = tmp_path / "skills"
|
||||
skills_dir.mkdir()
|
||||
|
||||
# Should return empty list
|
||||
skills = await _alist_skills(backend, str(skills_dir))
|
||||
assert skills == []
|
||||
|
||||
|
||||
async def test_alist_skills_from_backend_nonexistent_path(tmp_path: Path) -> None:
|
||||
"""Test listing skills from a path that doesn't exist (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Try to list from non-existent directory
|
||||
skills = await _alist_skills(backend, str(tmp_path / "nonexistent"))
|
||||
assert skills == []
|
||||
|
||||
|
||||
async def test_alist_skills_from_backend_missing_skill_md(tmp_path: Path) -> None:
|
||||
"""Test that directories without SKILL.md are skipped (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create a valid skill and an invalid one (missing SKILL.md)
|
||||
skills_dir = tmp_path / "skills"
|
||||
valid_skill_path = str(skills_dir / "valid-skill" / "SKILL.md")
|
||||
invalid_dir_file = str(skills_dir / "invalid-skill" / "readme.txt")
|
||||
|
||||
valid_content = make_skill_content("valid-skill", "Valid skill")
|
||||
|
||||
responses = backend.upload_files(
|
||||
[
|
||||
(valid_skill_path, valid_content.encode("utf-8")),
|
||||
(invalid_dir_file, b"Not a skill file"),
|
||||
]
|
||||
)
|
||||
|
||||
# List skills - should only get the valid one
|
||||
skills = await _alist_skills(backend, str(skills_dir))
|
||||
|
||||
assert skills == [
|
||||
{
|
||||
"name": "valid-skill",
|
||||
"description": "Valid skill",
|
||||
"path": valid_skill_path,
|
||||
"metadata": {},
|
||||
"license": None,
|
||||
"compatibility": None,
|
||||
"allowed_tools": [],
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
async def test_alist_skills_from_backend_invalid_frontmatter(tmp_path: Path) -> None:
|
||||
"""Test that skills with invalid YAML frontmatter are skipped (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
skills_dir = tmp_path / "skills"
|
||||
valid_skill_path = str(skills_dir / "valid-skill" / "SKILL.md")
|
||||
invalid_skill_path = str(skills_dir / "invalid-skill" / "SKILL.md")
|
||||
|
||||
valid_content = make_skill_content("valid-skill", "Valid skill")
|
||||
invalid_content = """---
|
||||
name: invalid-skill
|
||||
description: [unclosed yaml
|
||||
---
|
||||
|
||||
Content
|
||||
"""
|
||||
|
||||
responses = backend.upload_files(
|
||||
[
|
||||
(valid_skill_path, valid_content.encode("utf-8")),
|
||||
(invalid_skill_path, invalid_content.encode("utf-8")),
|
||||
]
|
||||
)
|
||||
|
||||
# Should only get the valid skill
|
||||
skills = await _alist_skills(backend, str(skills_dir))
|
||||
|
||||
assert skills == [
|
||||
{
|
||||
"name": "valid-skill",
|
||||
"description": "Valid skill",
|
||||
"path": valid_skill_path,
|
||||
"metadata": {},
|
||||
"license": None,
|
||||
"compatibility": None,
|
||||
"allowed_tools": [],
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
async def test_abefore_agent_loads_skills(tmp_path: Path) -> None:
|
||||
"""Test that abefore_agent loads skills from backend."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create some skills
|
||||
skills_dir = tmp_path / "skills" / "user"
|
||||
skill1_path = str(skills_dir / "skill-one" / "SKILL.md")
|
||||
skill2_path = str(skills_dir / "skill-two" / "SKILL.md")
|
||||
|
||||
skill1_content = make_skill_content("skill-one", "First skill")
|
||||
skill2_content = make_skill_content("skill-two", "Second skill")
|
||||
|
||||
backend.upload_files(
|
||||
[
|
||||
(skill1_path, skill1_content.encode("utf-8")),
|
||||
(skill2_path, skill2_content.encode("utf-8")),
|
||||
]
|
||||
)
|
||||
|
||||
sources = [str(skills_dir)]
|
||||
middleware = SkillsMiddleware(
|
||||
backend=backend,
|
||||
sources=sources,
|
||||
)
|
||||
|
||||
# Call abefore_agent
|
||||
result = await middleware.abefore_agent({}, None, {}) # type: ignore
|
||||
|
||||
assert result is not None
|
||||
assert "skills_metadata" in result
|
||||
assert len(result["skills_metadata"]) == 2
|
||||
|
||||
skill_names = {s["name"] for s in result["skills_metadata"]}
|
||||
assert skill_names == {"skill-one", "skill-two"}
|
||||
|
||||
|
||||
async def test_abefore_agent_skill_override(tmp_path: Path) -> None:
|
||||
"""Test that skills from later sources override earlier ones (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create same skill name in two sources
|
||||
base_dir = tmp_path / "skills" / "base"
|
||||
user_dir = tmp_path / "skills" / "user"
|
||||
|
||||
base_skill_path = str(base_dir / "shared-skill" / "SKILL.md")
|
||||
user_skill_path = str(user_dir / "shared-skill" / "SKILL.md")
|
||||
|
||||
base_content = make_skill_content("shared-skill", "Base description")
|
||||
user_content = make_skill_content("shared-skill", "User description")
|
||||
|
||||
backend.upload_files(
|
||||
[
|
||||
(base_skill_path, base_content.encode("utf-8")),
|
||||
(user_skill_path, user_content.encode("utf-8")),
|
||||
]
|
||||
)
|
||||
|
||||
sources = [
|
||||
str(base_dir),
|
||||
str(user_dir),
|
||||
]
|
||||
middleware = SkillsMiddleware(
|
||||
backend=backend,
|
||||
sources=sources,
|
||||
)
|
||||
|
||||
# Call abefore_agent
|
||||
result = await middleware.abefore_agent({}, None, {}) # type: ignore
|
||||
|
||||
assert result is not None
|
||||
assert len(result["skills_metadata"]) == 1
|
||||
|
||||
# Should have the user version (later source wins)
|
||||
skill = result["skills_metadata"][0]
|
||||
assert skill == {
|
||||
"name": "shared-skill",
|
||||
"description": "User description",
|
||||
"path": user_skill_path,
|
||||
"metadata": {},
|
||||
"license": None,
|
||||
"compatibility": None,
|
||||
"allowed_tools": [],
|
||||
}
|
||||
|
||||
|
||||
async def test_abefore_agent_empty_sources(tmp_path: Path) -> None:
|
||||
"""Test abefore_agent with empty sources (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create empty directories
|
||||
(tmp_path / "skills" / "user").mkdir(parents=True)
|
||||
|
||||
sources = [str(tmp_path / "skills" / "user")]
|
||||
middleware = SkillsMiddleware(
|
||||
backend=backend,
|
||||
sources=sources,
|
||||
)
|
||||
|
||||
result = await middleware.abefore_agent({}, None, {}) # type: ignore
|
||||
|
||||
assert result is not None
|
||||
assert result["skills_metadata"] == []
|
||||
|
||||
|
||||
async def test_abefore_agent_skips_loading_if_metadata_present(tmp_path: Path) -> None:
|
||||
"""Test that abefore_agent skips loading if skills_metadata is already in state."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create a skill in the backend
|
||||
skills_dir = tmp_path / "skills" / "user"
|
||||
skill_path = str(skills_dir / "test-skill" / "SKILL.md")
|
||||
skill_content = make_skill_content("test-skill", "A test skill")
|
||||
|
||||
backend.upload_files([(skill_path, skill_content.encode("utf-8"))])
|
||||
|
||||
sources = [str(skills_dir)]
|
||||
middleware = SkillsMiddleware(
|
||||
backend=backend,
|
||||
sources=sources,
|
||||
)
|
||||
|
||||
# State has skills_metadata already
|
||||
state_with_metadata = {"skills_metadata": []}
|
||||
result = await middleware.abefore_agent(state_with_metadata, None, {}) # type: ignore
|
||||
|
||||
# Should return None, not load new skills
|
||||
assert result is None
|
||||
|
||||
|
||||
async def test_agent_with_skills_middleware_multiple_sources_async(tmp_path: Path) -> None:
|
||||
"""Test agent with skills from multiple sources (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create skills in multiple sources
|
||||
base_dir = tmp_path / "skills" / "base"
|
||||
user_dir = tmp_path / "skills" / "user"
|
||||
|
||||
base_skill_path = str(base_dir / "base-skill" / "SKILL.md")
|
||||
user_skill_path = str(user_dir / "user-skill" / "SKILL.md")
|
||||
|
||||
base_content = make_skill_content("base-skill", "Base skill description")
|
||||
user_content = make_skill_content("user-skill", "User skill description")
|
||||
|
||||
responses = backend.upload_files(
|
||||
[
|
||||
(base_skill_path, base_content.encode("utf-8")),
|
||||
(user_skill_path, user_content.encode("utf-8")),
|
||||
]
|
||||
)
|
||||
assert all(r.error is None for r in responses)
|
||||
|
||||
# Create fake model
|
||||
fake_model = GenericFakeChatModel(messages=iter([AIMessage(content="I see both skills.")]))
|
||||
|
||||
# Create middleware with multiple sources
|
||||
sources = [
|
||||
str(base_dir),
|
||||
str(user_dir),
|
||||
]
|
||||
middleware = SkillsMiddleware(
|
||||
backend=backend,
|
||||
sources=sources,
|
||||
)
|
||||
|
||||
# Create agent
|
||||
agent = create_agent(model=fake_model, middleware=[middleware])
|
||||
|
||||
# Invoke asynchronously
|
||||
result = await agent.ainvoke({"messages": [HumanMessage(content="Help me")]})
|
||||
|
||||
assert "messages" in result
|
||||
assert len(result["messages"]) > 0
|
||||
|
||||
# Verify both skills are in system prompt
|
||||
first_call = fake_model.call_history[0]
|
||||
system_message = first_call["messages"][0]
|
||||
content = system_message.text
|
||||
|
||||
assert "base-skill" in content
|
||||
assert "user-skill" in content
|
||||
|
||||
|
||||
async def test_agent_with_skills_middleware_empty_sources_async(tmp_path: Path) -> None:
|
||||
"""Test that agent works with empty skills sources (async)."""
|
||||
backend = FilesystemBackend(root_dir=str(tmp_path), virtual_mode=False)
|
||||
|
||||
# Create empty skills directory
|
||||
skills_dir = tmp_path / "skills"
|
||||
skills_dir.mkdir()
|
||||
|
||||
# Create fake model
|
||||
fake_model = GenericFakeChatModel(messages=iter([AIMessage(content="Working without skills.")]))
|
||||
|
||||
# Create middleware with empty directory
|
||||
middleware = SkillsMiddleware(backend=backend, sources=[str(skills_dir)])
|
||||
|
||||
# Create agent
|
||||
agent = create_agent(model=fake_model, middleware=[middleware])
|
||||
|
||||
# Invoke asynchronously
|
||||
result = await agent.ainvoke({"messages": [HumanMessage(content="Hello")]})
|
||||
|
||||
assert "messages" in result
|
||||
assert len(result["messages"]) > 0
|
||||
|
||||
# Verify system prompt still contains Skills System section
|
||||
first_call = fake_model.call_history[0]
|
||||
system_message = first_call["messages"][0]
|
||||
content = system_message.text
|
||||
|
||||
assert "Skills System" in content
|
||||
assert "No skills available" in content
|
||||
@@ -957,6 +957,144 @@ class TestFilesystemMiddleware:
|
||||
assert isinstance(result, Command)
|
||||
assert "/large_tool_results/test_call_id" in result.update["files"]
|
||||
|
||||
def test_intercept_content_block_with_large_text(self):
|
||||
"""Test that content blocks with large text get evicted and converted to string."""
|
||||
from langgraph.types import Command
|
||||
|
||||
middleware = FilesystemMiddleware(tool_token_limit_before_evict=100)
|
||||
state = FilesystemState(messages=[], files={})
|
||||
runtime = ToolRuntime(state=state, context=None, tool_call_id="test_cb", store=None, stream_writer=lambda _: None, config={})
|
||||
|
||||
# Create list with content block with large text
|
||||
content_blocks = [{"type": "text", "text": "x" * 5000}]
|
||||
tool_message = ToolMessage(content=content_blocks, tool_call_id="test_cb")
|
||||
result = middleware._intercept_large_tool_result(tool_message, runtime)
|
||||
|
||||
assert isinstance(result, Command)
|
||||
assert "/large_tool_results/test_cb" in result.update["files"]
|
||||
# After eviction, content is always converted to plain string
|
||||
returned_content = result.update["messages"][0].content
|
||||
assert isinstance(returned_content, str)
|
||||
assert "Tool result too large" in returned_content
|
||||
|
||||
def test_intercept_content_block_with_small_text(self):
|
||||
"""Test that content blocks with small text are not evicted."""
|
||||
middleware = FilesystemMiddleware(tool_token_limit_before_evict=1000)
|
||||
state = FilesystemState(messages=[], files={})
|
||||
runtime = ToolRuntime(state=state, context=None, tool_call_id="test_small_cb", store=None, stream_writer=lambda _: None, config={})
|
||||
|
||||
# Create list with content block with small text
|
||||
content_blocks = [{"type": "text", "text": "small text"}]
|
||||
tool_message = ToolMessage(content=content_blocks, tool_call_id="test_small_cb")
|
||||
result = middleware._intercept_large_tool_result(tool_message, runtime)
|
||||
|
||||
# Should return original message unchanged
|
||||
assert result == tool_message
|
||||
assert result.content == content_blocks
|
||||
|
||||
def test_intercept_content_block_non_text_type(self):
|
||||
"""Test that content blocks with non-text type get evicted if large when stringified."""
|
||||
from langgraph.types import Command
|
||||
|
||||
middleware = FilesystemMiddleware(tool_token_limit_before_evict=100)
|
||||
state = FilesystemState(messages=[], files={})
|
||||
runtime = ToolRuntime(state=state, context=None, tool_call_id="test_other", store=None, stream_writer=lambda _: None, config={})
|
||||
|
||||
# Create list with content block with different type that's large when stringified
|
||||
content_blocks = [{"type": "image", "data": "x" * 5000}]
|
||||
tool_message = ToolMessage(content=content_blocks, tool_call_id="test_other")
|
||||
result = middleware._intercept_large_tool_result(tool_message, runtime)
|
||||
|
||||
# All content types are evicted if large when converted to string
|
||||
assert isinstance(result, Command)
|
||||
assert "/large_tool_results/test_other" in result.update["files"]
|
||||
|
||||
def test_intercept_list_content_gets_evicted_if_large(self):
|
||||
"""Test that list content gets evicted if large when stringified."""
|
||||
from langgraph.types import Command
|
||||
|
||||
middleware = FilesystemMiddleware(tool_token_limit_before_evict=100)
|
||||
state = FilesystemState(messages=[], files={})
|
||||
runtime = ToolRuntime(state=state, context=None, tool_call_id="test_list", store=None, stream_writer=lambda _: None, config={})
|
||||
|
||||
# Create list content that's large when stringified
|
||||
list_content = [{"key": "x" * 1000} for _ in range(50)]
|
||||
tool_message = ToolMessage(content=list_content, tool_call_id="test_list")
|
||||
result = middleware._intercept_large_tool_result(tool_message, runtime)
|
||||
|
||||
# List content is evicted if large when converted to string
|
||||
assert isinstance(result, Command)
|
||||
assert "/large_tool_results/test_list" in result.update["files"]
|
||||
|
||||
def test_single_text_block_extracts_text_directly(self):
|
||||
"""Test that single text block extracts text content directly, not stringified structure."""
|
||||
from langgraph.types import Command
|
||||
|
||||
middleware = FilesystemMiddleware(tool_token_limit_before_evict=100)
|
||||
state = FilesystemState(messages=[], files={})
|
||||
runtime = ToolRuntime(state=state, context=None, tool_call_id="test_single", store=None, stream_writer=lambda _: None, config={})
|
||||
|
||||
# Create single text block with large text
|
||||
content_blocks = [{"type": "text", "text": "Hello world! " * 1000}]
|
||||
tool_message = ToolMessage(content=content_blocks, tool_call_id="test_single")
|
||||
result = middleware._intercept_large_tool_result(tool_message, runtime)
|
||||
|
||||
assert isinstance(result, Command)
|
||||
# Check that the file contains actual text, not stringified dict
|
||||
file_content = result.update["files"]["/large_tool_results/test_single"]["content"]
|
||||
file_text = "\n".join(file_content)
|
||||
# Should start with the actual text, not with "[{" which would indicate stringified dict
|
||||
assert file_text.startswith("Hello world!")
|
||||
assert not file_text.startswith("[{")
|
||||
|
||||
def test_multiple_text_blocks_stringifies_structure(self):
|
||||
"""Test that multiple text blocks stringify entire structure."""
|
||||
from langgraph.types import Command
|
||||
|
||||
middleware = FilesystemMiddleware(tool_token_limit_before_evict=100)
|
||||
state = FilesystemState(messages=[], files={})
|
||||
runtime = ToolRuntime(state=state, context=None, tool_call_id="test_multi", store=None, stream_writer=lambda _: None, config={})
|
||||
|
||||
# Create multiple text blocks
|
||||
content_blocks = [
|
||||
{"type": "text", "text": "First block " * 500},
|
||||
{"type": "text", "text": "Second block " * 500},
|
||||
]
|
||||
tool_message = ToolMessage(content=content_blocks, tool_call_id="test_multi")
|
||||
result = middleware._intercept_large_tool_result(tool_message, runtime)
|
||||
|
||||
assert isinstance(result, Command)
|
||||
# Check that the file contains stringified structure (starts with "[")
|
||||
file_content = result.update["files"]["/large_tool_results/test_multi"]["content"]
|
||||
file_text = "\n".join(file_content)
|
||||
# Should be stringified list of dicts
|
||||
assert file_text.startswith("[{")
|
||||
|
||||
def test_mixed_content_blocks_stringifies_all(self):
|
||||
"""Test that mixed content block types (text + image) stringify entire structure."""
|
||||
from langgraph.types import Command
|
||||
|
||||
middleware = FilesystemMiddleware(tool_token_limit_before_evict=100)
|
||||
state = FilesystemState(messages=[], files={})
|
||||
runtime = ToolRuntime(state=state, context=None, tool_call_id="test_mixed", store=None, stream_writer=lambda _: None, config={})
|
||||
|
||||
# Create mixed content blocks
|
||||
content_blocks = [
|
||||
{"type": "text", "text": "Some text " * 200},
|
||||
{"type": "image", "url": "https://example.com/image.png"},
|
||||
]
|
||||
tool_message = ToolMessage(content=content_blocks, tool_call_id="test_mixed")
|
||||
result = middleware._intercept_large_tool_result(tool_message, runtime)
|
||||
|
||||
assert isinstance(result, Command)
|
||||
# Check that the file contains stringified structure
|
||||
file_content = result.update["files"]["/large_tool_results/test_mixed"]["content"]
|
||||
file_text = "\n".join(file_content)
|
||||
assert file_text.startswith("[{")
|
||||
# Should contain both blocks in the stringified output
|
||||
assert "'type': 'text'" in file_text
|
||||
assert "'type': 'image'" in file_text
|
||||
|
||||
def test_execute_tool_returns_error_when_backend_doesnt_support(self):
|
||||
"""Test that execute tool returns friendly error instead of raising exception."""
|
||||
state = FilesystemState(messages=[], files={})
|
||||
|
||||
@@ -0,0 +1,99 @@
|
||||
"""Tests for TodoListMiddleware functionality.
|
||||
|
||||
This module contains tests for the todo list middleware, focusing on how it handles
|
||||
write_todos tool calls, state management, and edge cases.
|
||||
"""
|
||||
|
||||
from langchain.agents import create_agent
|
||||
from langchain.agents.middleware import TodoListMiddleware
|
||||
from langchain_core.messages import AIMessage, HumanMessage
|
||||
|
||||
from tests.unit_tests.chat_model import GenericFakeChatModel
|
||||
|
||||
|
||||
class TestTodoMiddleware:
|
||||
"""Tests for TodoListMiddleware behavior."""
|
||||
|
||||
def test_todo_middleware_rejects_multiple_write_todos_in_same_message(self) -> None:
|
||||
"""Test that todo middleware rejects multiple write_todos calls in one AIMessage.
|
||||
|
||||
This test verifies that:
|
||||
1. When an agent calls write_todos multiple times in the same AIMessage
|
||||
2. The middleware detects this and returns error messages for both calls
|
||||
3. The errors inform that write_todos should not be called in parallel
|
||||
4. The agent receives the error messages and can recover
|
||||
|
||||
This validates that the todo middleware properly enforces the constraint that
|
||||
write_todos should not be called multiple times in parallel, as stated in the
|
||||
system prompt.
|
||||
"""
|
||||
# Create a fake model that calls write_todos twice in the same AIMessage
|
||||
fake_model = GenericFakeChatModel(
|
||||
messages=iter(
|
||||
[
|
||||
# First response: call write_todos TWICE in the same message
|
||||
AIMessage(
|
||||
content="",
|
||||
tool_calls=[
|
||||
{
|
||||
"name": "write_todos",
|
||||
"args": {
|
||||
"todos": [
|
||||
{
|
||||
"content": "First task",
|
||||
"status": "in_progress",
|
||||
"activeForm": "Working on first task",
|
||||
},
|
||||
]
|
||||
},
|
||||
"id": "call_write_todos_1",
|
||||
"type": "tool_call",
|
||||
},
|
||||
{
|
||||
"name": "write_todos",
|
||||
"args": {
|
||||
"todos": [
|
||||
{
|
||||
"content": "First task",
|
||||
"status": "completed",
|
||||
"activeForm": "Working on first task",
|
||||
},
|
||||
{
|
||||
"content": "Second task",
|
||||
"status": "pending",
|
||||
"activeForm": "Working on second task",
|
||||
},
|
||||
]
|
||||
},
|
||||
"id": "call_write_todos_2",
|
||||
"type": "tool_call",
|
||||
},
|
||||
],
|
||||
),
|
||||
# Second response: final message
|
||||
AIMessage(content="Both tasks have been planned successfully."),
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
# Create an agent with TodoListMiddleware
|
||||
agent = create_agent(
|
||||
model=fake_model,
|
||||
middleware=[TodoListMiddleware()],
|
||||
)
|
||||
|
||||
# Invoke the agent
|
||||
result = agent.invoke({"messages": [HumanMessage(content="Plan the work")]})
|
||||
|
||||
# The middleware should return error messages for both parallel write_todos calls
|
||||
tool_messages = [msg for msg in result["messages"] if msg.type == "tool"]
|
||||
assert len(tool_messages) == 2, f"Expected 2 error messages, got {len(tool_messages)}"
|
||||
|
||||
# Verify exact error content and status for both tool messages
|
||||
expected_error = "Error: The `write_todos` tool should never be called multiple times in parallel. Please call it only once per model invocation to update the todo list."
|
||||
for tool_msg in tool_messages:
|
||||
assert tool_msg.content == expected_error, f"Expected exact error message, got: {tool_msg.content}"
|
||||
assert tool_msg.status == "error", f"Tool message status should be 'error', got: {tool_msg.status}"
|
||||
|
||||
# No todos should be written since both calls were rejected
|
||||
assert result.get("todos", []) == [], "Todos should be empty when parallel writes are rejected"
|
||||
282
deepagents_sourcecode/libs/deepagents/uv.lock
generated
282
deepagents_sourcecode/libs/deepagents/uv.lock
generated
@@ -18,7 +18,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "anthropic"
|
||||
version = "0.74.1"
|
||||
version = "0.75.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
@@ -30,9 +30,9 @@ dependencies = [
|
||||
{ name = "sniffio" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d7/7b/609eea5c54ae69b1a4a94169d4b0c86dc5c41b43509989913f6cdc61b81d/anthropic-0.74.1.tar.gz", hash = "sha256:04c087b2751385c524f6d332d066a913870e4de8b3e335fb0a0c595f1f88dc6e", size = 428981, upload-time = "2025-11-19T22:17:31.533Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/04/1f/08e95f4b7e2d35205ae5dcbb4ae97e7d477fc521c275c02609e2931ece2d/anthropic-0.75.0.tar.gz", hash = "sha256:e8607422f4ab616db2ea5baacc215dd5f028da99ce2f022e33c7c535b29f3dfb", size = 439565, upload-time = "2025-11-24T20:41:45.28Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/dd/45/6b18d0692302b8cbc01a10c35b43953d3c4172fbd4f83337b8ed21a8eaa4/anthropic-0.74.1-py3-none-any.whl", hash = "sha256:b07b998d1cee7f41d9f02530597d7411672b362cc2417760a40c0167b81c6e65", size = 371473, upload-time = "2025-11-19T22:17:29.998Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/1c/1cd02b7ae64302a6e06724bf80a96401d5313708651d277b1458504a1730/anthropic-0.75.0-py3-none-any.whl", hash = "sha256:ea8317271b6c15d80225a9f3c670152746e88805a7a61e14d4a374577164965b", size = 388164, upload-time = "2025-11-24T20:41:43.587Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -81,15 +81,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/8c/2b30c12155ad8de0cf641d76a8b396a16d2c36bc6d50b621a62b7c4567c1/build-1.3.0-py3-none-any.whl", hash = "sha256:7145f0b5061ba90a1500d60bd1b13ca0a8a4cebdd0cc16ed8adf1c0e739f43b4", size = 23382, upload-time = "2025-08-01T21:27:07.844Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cachetools"
|
||||
version = "6.2.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/bc/1d/ede8680603f6016887c062a2cf4fc8fdba905866a3ab8831aa8aa651320c/cachetools-6.2.4.tar.gz", hash = "sha256:82c5c05585e70b6ba2d3ae09ea60b79548872185d2f24ae1f2709d37299fd607", size = 31731, upload-time = "2025-12-15T18:24:53.744Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/fc/1d7b80d0eb7b714984ce40efc78859c022cd930e402f599d8ca9e39c78a4/cachetools-6.2.4-py3-none-any.whl", hash = "sha256:69a7a52634fed8b8bf6e24a050fb60bff1c9bd8f6d24572b99c32d4e71e62a51", size = 11551, upload-time = "2025-12-15T18:24:52.332Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "certifi"
|
||||
version = "2025.11.12"
|
||||
@@ -368,7 +359,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "deepagents"
|
||||
version = "0.3.1"
|
||||
version = "0.3.5"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "langchain" },
|
||||
@@ -395,10 +386,10 @@ test = [
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "langchain", specifier = ">=1.1.0,<2.0.0" },
|
||||
{ name = "langchain-anthropic", specifier = ">=1.2.0,<2.0.0" },
|
||||
{ name = "langchain-core", specifier = ">=1.1.0,<2.0.0" },
|
||||
{ name = "langchain-google-genai" },
|
||||
{ name = "langchain", specifier = ">=1.2.3,<2.0.0" },
|
||||
{ name = "langchain-anthropic", specifier = ">=1.3.1,<2.0.0" },
|
||||
{ name = "langchain-core", specifier = ">=1.2.6,<2.0.0" },
|
||||
{ name = "langchain-google-genai", specifier = ">=4.1.3,<5.0.0" },
|
||||
{ name = "wcmatch" },
|
||||
]
|
||||
|
||||
@@ -462,133 +453,43 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/18/79/1b8fa1bb3568781e84c9200f951c735f3f157429f44be0495da55894d620/filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25", size = 19970, upload-time = "2022-11-02T17:34:01.425Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "google-ai-generativelanguage"
|
||||
version = "0.9.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "google-api-core", extra = ["grpc"] },
|
||||
{ name = "google-auth" },
|
||||
{ name = "grpcio" },
|
||||
{ name = "proto-plus" },
|
||||
{ name = "protobuf" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ad/7e/67fdc46187541ead599e77f259d915f129c2f49568ebf5cadb322130712b/google_ai_generativelanguage-0.9.0.tar.gz", hash = "sha256:2524748f413917446febc8e0879dc0d4f026a064f89f17c42b81bea77ab76c84", size = 1481662, upload-time = "2025-10-20T14:56:23.123Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/91/c2d39ad5d77813afadb0f0b8789d882d15c191710b6b6f7cb158376342ff/google_ai_generativelanguage-0.9.0-py3-none-any.whl", hash = "sha256:59f61e54cb341e602073098389876594c4d12e458617727558bb2628a86f3eb2", size = 1401288, upload-time = "2025-10-20T14:52:58.403Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "google-api-core"
|
||||
version = "2.28.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "google-auth" },
|
||||
{ name = "googleapis-common-protos" },
|
||||
{ name = "proto-plus" },
|
||||
{ name = "protobuf" },
|
||||
{ name = "requests" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/61/da/83d7043169ac2c8c7469f0e375610d78ae2160134bf1b80634c482fa079c/google_api_core-2.28.1.tar.gz", hash = "sha256:2b405df02d68e68ce0fbc138559e6036559e685159d148ae5861013dc201baf8", size = 176759, upload-time = "2025-10-28T21:34:51.529Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ed/d4/90197b416cb61cefd316964fd9e7bd8324bcbafabf40eef14a9f20b81974/google_api_core-2.28.1-py3-none-any.whl", hash = "sha256:4021b0f8ceb77a6fb4de6fde4502cecab45062e66ff4f2895169e0b35bc9466c", size = 173706, upload-time = "2025-10-28T21:34:50.151Z" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
grpc = [
|
||||
{ name = "grpcio" },
|
||||
{ name = "grpcio-status" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "google-auth"
|
||||
version = "2.45.0"
|
||||
version = "2.47.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "cachetools" },
|
||||
{ name = "pyasn1-modules" },
|
||||
{ name = "rsa" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e5/00/3c794502a8b892c404b2dea5b3650eb21bfc7069612fbfd15c7f17c1cb0d/google_auth-2.45.0.tar.gz", hash = "sha256:90d3f41b6b72ea72dd9811e765699ee491ab24139f34ebf1ca2b9cc0c38708f3", size = 320708, upload-time = "2025-12-15T22:58:42.889Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/60/3c/ec64b9a275ca22fa1cd3b6e77fefcf837b0732c890aa32d2bd21313d9b33/google_auth-2.47.0.tar.gz", hash = "sha256:833229070a9dfee1a353ae9877dcd2dec069a8281a4e72e72f77d4a70ff945da", size = 323719, upload-time = "2026-01-06T21:55:31.045Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c6/97/451d55e05487a5cd6279a01a7e34921858b16f7dc8aa38a2c684743cd2b3/google_auth-2.45.0-py2.py3-none-any.whl", hash = "sha256:82344e86dc00410ef5382d99be677c6043d72e502b625aa4f4afa0bdacca0f36", size = 233312, upload-time = "2025-12-15T22:58:40.777Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/db/18/79e9008530b79527e0d5f79e7eef08d3b179b7f851cfd3a2f27822fbdfa9/google_auth-2.47.0-py3-none-any.whl", hash = "sha256:c516d68336bfde7cf0da26aab674a36fedcf04b37ac4edd59c597178760c3498", size = 234867, upload-time = "2026-01-06T21:55:28.6Z" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
requests = [
|
||||
{ name = "requests" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "googleapis-common-protos"
|
||||
version = "1.72.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "protobuf" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e5/7b/adfd75544c415c487b33061fe7ae526165241c1ea133f9a9125a56b39fd8/googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5", size = 147433, upload-time = "2025-11-06T18:29:24.087Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grpcio"
|
||||
version = "1.76.0"
|
||||
name = "google-genai"
|
||||
version = "1.57.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
{ name = "distro" },
|
||||
{ name = "google-auth", extra = ["requests"] },
|
||||
{ name = "httpx" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "requests" },
|
||||
{ name = "sniffio" },
|
||||
{ name = "tenacity" },
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "websockets" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b6/e0/318c1ce3ae5a17894d5791e87aea147587c9e702f24122cc7a5c8bbaeeb1/grpcio-1.76.0.tar.gz", hash = "sha256:7be78388d6da1a25c0d5ec506523db58b18be22d9c37d8d3a32c08be4987bd73", size = 12785182, upload-time = "2025-10-21T16:23:12.106Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/2b/b4/8251c2d2576224a4b51a8ab6159820f9200b8da28ff555c78ee15607096e/google_genai-1.57.0.tar.gz", hash = "sha256:0ff9c36b8d68abfbdbd13b703ece926de5f3e67955666b36315ecf669b94a826", size = 485648, upload-time = "2026-01-07T20:38:20.271Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a0/00/8163a1beeb6971f66b4bbe6ac9457b97948beba8dd2fc8e1281dce7f79ec/grpcio-1.76.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:2e1743fbd7f5fa713a1b0a8ac8ebabf0ec980b5d8809ec358d488e273b9cf02a", size = 5843567, upload-time = "2025-10-21T16:20:52.829Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/10/c1/934202f5cf335e6d852530ce14ddb0fef21be612ba9ecbbcbd4d748ca32d/grpcio-1.76.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:a8c2cf1209497cf659a667d7dea88985e834c24b7c3b605e6254cbb5076d985c", size = 11848017, upload-time = "2025-10-21T16:20:56.705Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/11/0b/8dec16b1863d74af6eb3543928600ec2195af49ca58b16334972f6775663/grpcio-1.76.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:08caea849a9d3c71a542827d6df9d5a69067b0a1efbea8a855633ff5d9571465", size = 6412027, upload-time = "2025-10-21T16:20:59.3Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/64/7b9e6e7ab910bea9d46f2c090380bab274a0b91fb0a2fe9b0cd399fffa12/grpcio-1.76.0-cp311-cp311-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:f0e34c2079d47ae9f6188211db9e777c619a21d4faba6977774e8fa43b085e48", size = 7075913, upload-time = "2025-10-21T16:21:01.645Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/68/86/093c46e9546073cefa789bd76d44c5cb2abc824ca62af0c18be590ff13ba/grpcio-1.76.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8843114c0cfce61b40ad48df65abcfc00d4dba82eae8718fab5352390848c5da", size = 6615417, upload-time = "2025-10-21T16:21:03.844Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f7/b6/5709a3a68500a9c03da6fb71740dcdd5ef245e39266461a03f31a57036d8/grpcio-1.76.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8eddfb4d203a237da6f3cc8a540dad0517d274b5a1e9e636fd8d2c79b5c1d397", size = 7199683, upload-time = "2025-10-21T16:21:06.195Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/91/d3/4b1f2bf16ed52ce0b508161df3a2d186e4935379a159a834cb4a7d687429/grpcio-1.76.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:32483fe2aab2c3794101c2a159070584e5db11d0aa091b2c0ea9c4fc43d0d749", size = 8163109, upload-time = "2025-10-21T16:21:08.498Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5c/61/d9043f95f5f4cf085ac5dd6137b469d41befb04bd80280952ffa2a4c3f12/grpcio-1.76.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:dcfe41187da8992c5f40aa8c5ec086fa3672834d2be57a32384c08d5a05b4c00", size = 7626676, upload-time = "2025-10-21T16:21:10.693Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/36/95/fd9a5152ca02d8881e4dd419cdd790e11805979f499a2e5b96488b85cf27/grpcio-1.76.0-cp311-cp311-win32.whl", hash = "sha256:2107b0c024d1b35f4083f11245c0e23846ae64d02f40b2b226684840260ed054", size = 3997688, upload-time = "2025-10-21T16:21:12.746Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/9c/5c359c8d4c9176cfa3c61ecd4efe5affe1f38d9bae81e81ac7186b4c9cc8/grpcio-1.76.0-cp311-cp311-win_amd64.whl", hash = "sha256:522175aba7af9113c48ec10cc471b9b9bd4f6ceb36aeb4544a8e2c80ed9d252d", size = 4709315, upload-time = "2025-10-21T16:21:15.26Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bf/05/8e29121994b8d959ffa0afd28996d452f291b48cfc0875619de0bde2c50c/grpcio-1.76.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:81fd9652b37b36f16138611c7e884eb82e0cec137c40d3ef7c3f9b3ed00f6ed8", size = 5799718, upload-time = "2025-10-21T16:21:17.939Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/75/11d0e66b3cdf998c996489581bdad8900db79ebd83513e45c19548f1cba4/grpcio-1.76.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:04bbe1bfe3a68bbfd4e52402ab7d4eb59d72d02647ae2042204326cf4bbad280", size = 11825627, upload-time = "2025-10-21T16:21:20.466Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/28/50/2f0aa0498bc188048f5d9504dcc5c2c24f2eb1a9337cd0fa09a61a2e75f0/grpcio-1.76.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d388087771c837cdb6515539f43b9d4bf0b0f23593a24054ac16f7a960be16f4", size = 6359167, upload-time = "2025-10-21T16:21:23.122Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/66/e5/bbf0bb97d29ede1d59d6588af40018cfc345b17ce979b7b45424628dc8bb/grpcio-1.76.0-cp312-cp312-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:9f8f757bebaaea112c00dba718fc0d3260052ce714e25804a03f93f5d1c6cc11", size = 7044267, upload-time = "2025-10-21T16:21:25.995Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/86/f6ec2164f743d9609691115ae8ece098c76b894ebe4f7c94a655c6b03e98/grpcio-1.76.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:980a846182ce88c4f2f7e2c22c56aefd515daeb36149d1c897f83cf57999e0b6", size = 6573963, upload-time = "2025-10-21T16:21:28.631Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/bc/8d9d0d8505feccfdf38a766d262c71e73639c165b311c9457208b56d92ae/grpcio-1.76.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f92f88e6c033db65a5ae3d97905c8fea9c725b63e28d5a75cb73b49bda5024d8", size = 7164484, upload-time = "2025-10-21T16:21:30.837Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/67/e6/5d6c2fc10b95edf6df9b8f19cf10a34263b7fd48493936fffd5085521292/grpcio-1.76.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4baf3cbe2f0be3289eb68ac8ae771156971848bb8aaff60bad42005539431980", size = 8127777, upload-time = "2025-10-21T16:21:33.577Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3f/c8/dce8ff21c86abe025efe304d9e31fdb0deaaa3b502b6a78141080f206da0/grpcio-1.76.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:615ba64c208aaceb5ec83bfdce7728b80bfeb8be97562944836a7a0a9647d882", size = 7594014, upload-time = "2025-10-21T16:21:41.882Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e0/42/ad28191ebf983a5d0ecef90bab66baa5a6b18f2bfdef9d0a63b1973d9f75/grpcio-1.76.0-cp312-cp312-win32.whl", hash = "sha256:45d59a649a82df5718fd9527ce775fd66d1af35e6d31abdcdc906a49c6822958", size = 3984750, upload-time = "2025-10-21T16:21:44.006Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/00/7bd478cbb851c04a48baccaa49b75abaa8e4122f7d86da797500cccdd771/grpcio-1.76.0-cp312-cp312-win_amd64.whl", hash = "sha256:c088e7a90b6017307f423efbb9d1ba97a22aa2170876223f9709e9d1de0b5347", size = 4704003, upload-time = "2025-10-21T16:21:46.244Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/ed/71467ab770effc9e8cef5f2e7388beb2be26ed642d567697bb103a790c72/grpcio-1.76.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:26ef06c73eb53267c2b319f43e6634c7556ea37672029241a056629af27c10e2", size = 5807716, upload-time = "2025-10-21T16:21:48.475Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/85/c6ed56f9817fab03fa8a111ca91469941fb514e3e3ce6d793cb8f1e1347b/grpcio-1.76.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:45e0111e73f43f735d70786557dc38141185072d7ff8dc1829d6a77ac1471468", size = 11821522, upload-time = "2025-10-21T16:21:51.142Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ac/31/2b8a235ab40c39cbc141ef647f8a6eb7b0028f023015a4842933bc0d6831/grpcio-1.76.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:83d57312a58dcfe2a3a0f9d1389b299438909a02db60e2f2ea2ae2d8034909d3", size = 6362558, upload-time = "2025-10-21T16:21:54.213Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/64/9784eab483358e08847498ee56faf8ff6ea8e0a4592568d9f68edc97e9e9/grpcio-1.76.0-cp313-cp313-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:3e2a27c89eb9ac3d81ec8835e12414d73536c6e620355d65102503064a4ed6eb", size = 7049990, upload-time = "2025-10-21T16:21:56.476Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/94/8c12319a6369434e7a184b987e8e9f3b49a114c489b8315f029e24de4837/grpcio-1.76.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61f69297cba3950a524f61c7c8ee12e55c486cb5f7db47ff9dcee33da6f0d3ae", size = 6575387, upload-time = "2025-10-21T16:21:59.051Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/15/0f/f12c32b03f731f4a6242f771f63039df182c8b8e2cf8075b245b409259d4/grpcio-1.76.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6a15c17af8839b6801d554263c546c69c4d7718ad4321e3166175b37eaacca77", size = 7166668, upload-time = "2025-10-21T16:22:02.049Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/2d/3ec9ce0c2b1d92dd59d1c3264aaec9f0f7c817d6e8ac683b97198a36ed5a/grpcio-1.76.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:25a18e9810fbc7e7f03ec2516addc116a957f8cbb8cbc95ccc80faa072743d03", size = 8124928, upload-time = "2025-10-21T16:22:04.984Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/74/fd3317be5672f4856bcdd1a9e7b5e17554692d3db9a3b273879dc02d657d/grpcio-1.76.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:931091142fd8cc14edccc0845a79248bc155425eee9a98b2db2ea4f00a235a42", size = 7589983, upload-time = "2025-10-21T16:22:07.881Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/bb/ca038cf420f405971f19821c8c15bcbc875505f6ffadafe9ffd77871dc4c/grpcio-1.76.0-cp313-cp313-win32.whl", hash = "sha256:5e8571632780e08526f118f74170ad8d50fb0a48c23a746bef2a6ebade3abd6f", size = 3984727, upload-time = "2025-10-21T16:22:10.032Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/80/84087dc56437ced7cdd4b13d7875e7439a52a261e3ab4e06488ba6173b0a/grpcio-1.76.0-cp313-cp313-win_amd64.whl", hash = "sha256:f9f7bd5faab55f47231ad8dba7787866b69f5e93bc306e3915606779bbfb4ba8", size = 4702799, upload-time = "2025-10-21T16:22:12.709Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/46/39adac80de49d678e6e073b70204091e76631e03e94928b9ea4ecf0f6e0e/grpcio-1.76.0-cp314-cp314-linux_armv7l.whl", hash = "sha256:ff8a59ea85a1f2191a0ffcc61298c571bc566332f82e5f5be1b83c9d8e668a62", size = 5808417, upload-time = "2025-10-21T16:22:15.02Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9c/f5/a4531f7fb8b4e2a60b94e39d5d924469b7a6988176b3422487be61fe2998/grpcio-1.76.0-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:06c3d6b076e7b593905d04fdba6a0525711b3466f43b3400266f04ff735de0cd", size = 11828219, upload-time = "2025-10-21T16:22:17.954Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/1c/de55d868ed7a8bd6acc6b1d6ddc4aa36d07a9f31d33c912c804adb1b971b/grpcio-1.76.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fd5ef5932f6475c436c4a55e4336ebbe47bd3272be04964a03d316bbf4afbcbc", size = 6367826, upload-time = "2025-10-21T16:22:20.721Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/59/64/99e44c02b5adb0ad13ab3adc89cb33cb54bfa90c74770f2607eea629b86f/grpcio-1.76.0-cp314-cp314-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:b331680e46239e090f5b3cead313cc772f6caa7d0fc8de349337563125361a4a", size = 7049550, upload-time = "2025-10-21T16:22:23.637Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/43/28/40a5be3f9a86949b83e7d6a2ad6011d993cbe9b6bd27bea881f61c7788b6/grpcio-1.76.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2229ae655ec4e8999599469559e97630185fdd53ae1e8997d147b7c9b2b72cba", size = 6575564, upload-time = "2025-10-21T16:22:26.016Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/a9/1be18e6055b64467440208a8559afac243c66a8b904213af6f392dc2212f/grpcio-1.76.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:490fa6d203992c47c7b9e4a9d39003a0c2bcc1c9aa3c058730884bbbb0ee9f09", size = 7176236, upload-time = "2025-10-21T16:22:28.362Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/55/dba05d3fcc151ce6e81327541d2cc8394f442f6b350fead67401661bf041/grpcio-1.76.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:479496325ce554792dba6548fae3df31a72cef7bad71ca2e12b0e58f9b336bfc", size = 8125795, upload-time = "2025-10-21T16:22:31.075Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4a/45/122df922d05655f63930cf42c9e3f72ba20aadb26c100ee105cad4ce4257/grpcio-1.76.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:1c9b93f79f48b03ada57ea24725d83a30284a012ec27eab2cf7e50a550cbbbcc", size = 7592214, upload-time = "2025-10-21T16:22:33.831Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4a/6e/0b899b7f6b66e5af39e377055fb4a6675c9ee28431df5708139df2e93233/grpcio-1.76.0-cp314-cp314-win32.whl", hash = "sha256:747fa73efa9b8b1488a95d0ba1039c8e2dca0f741612d80415b1e1c560febf4e", size = 4062961, upload-time = "2025-10-21T16:22:36.468Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/41/0b430b01a2eb38ee887f88c1f07644a1df8e289353b78e82b37ef988fb64/grpcio-1.76.0-cp314-cp314-win_amd64.whl", hash = "sha256:922fa70ba549fce362d2e2871ab542082d66e2aaf0c19480ea453905b01f384e", size = 4834462, upload-time = "2025-10-21T16:22:39.772Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grpcio-status"
|
||||
version = "1.76.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "googleapis-common-protos" },
|
||||
{ name = "grpcio" },
|
||||
{ name = "protobuf" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/3f/46/e9f19d5be65e8423f886813a2a9d0056ba94757b0c5007aa59aed1a961fa/grpcio_status-1.76.0.tar.gz", hash = "sha256:25fcbfec74c15d1a1cb5da3fab8ee9672852dc16a5a9eeb5baf7d7a9952943cd", size = 13679, upload-time = "2025-10-21T16:28:52.545Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/8c/cc/27ba60ad5a5f2067963e6a858743500df408eb5855e98be778eaef8c9b02/grpcio_status-1.76.0-py3-none-any.whl", hash = "sha256:380568794055a8efbbd8871162df92012e0228a5f6dffaf57f2a00c534103b18", size = 14425, upload-time = "2025-10-21T16:28:40.853Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/02/858bdae08e2184b6afe0b18bc3113318522c9cf326a5a1698055edd31f88/google_genai-1.57.0-py3-none-any.whl", hash = "sha256:d63c7a89a1f549c4d14032f41a0cdb4b6fe3f565e2eee6b5e0907a0aeceabefd", size = 713323, upload-time = "2026-01-07T20:38:18.051Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -841,35 +742,35 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain"
|
||||
version = "1.1.0"
|
||||
version = "1.2.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
{ name = "langgraph" },
|
||||
{ name = "pydantic" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a1/06/be7273c6c15f5a7e64788ed2aa6329dd019170a176977acff7bcde2cdea2/langchain-1.1.0.tar.gz", hash = "sha256:583c892f59873c0329dbe04169fb3234ac794c50780e7c6fb62a61c7b86a981b", size = 528416, upload-time = "2025-11-24T15:31:24.47Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/5f/78/9565319259d92818d96f30d55507ee1072fbf5c008b95a6acecf5e47c4d6/langchain-1.2.3.tar.gz", hash = "sha256:9d6171f9c3c760ca3c7c2cf8518e6f8625380962c488b41e35ebff1f1d611077", size = 548296, upload-time = "2026-01-08T20:26:30.149Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/6f/889c01d22c84934615fa3f2dcf94c2fe76fd0afa7a7d01f9b798059f0ecc/langchain-1.1.0-py3-none-any.whl", hash = "sha256:af080f3a4a779bfa5925de7aacb6dfab83249d4aab9a08f7aa7b9bec3766d8ea", size = 101797, upload-time = "2025-11-24T15:31:23.401Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/e5/9b4f58533f8ce3013b1a993289eb11e8607d9c9d9d14699b29c6ac3b4132/langchain-1.2.3-py3-none-any.whl", hash = "sha256:5cdc7c80f672962b030c4b0d16d0d8f26d849c0ada63a4b8653a20d7505512ae", size = 106428, upload-time = "2026-01-08T20:26:29.162Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-anthropic"
|
||||
version = "1.2.0"
|
||||
version = "1.3.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anthropic" },
|
||||
{ name = "langchain-core" },
|
||||
{ name = "pydantic" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/66/f2/717dcadf0c96960154594409b68bdd5953ab95439e0b65de13cdd5c08785/langchain_anthropic-1.2.0.tar.gz", hash = "sha256:3f3cfad8c519ead2deb21c30dc538b18f4c094704c7874784320cbed7a199453", size = 688803, upload-time = "2025-11-24T14:17:17.424Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0d/b6/ac5ee84e15bf79844c9c791f99a614c7ec7e1a63c2947e55977be01a81b4/langchain_anthropic-1.3.1.tar.gz", hash = "sha256:4f3d7a4a7729ab1aeaf62d32c87d4d227c1b5421668ca9e3734562b383470b07", size = 708940, upload-time = "2026-01-05T21:07:19.345Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ae/f4/f684725bd375208130ff3e9878ff3e671d888eec89a834617f3d7bcc14c9/langchain_anthropic-1.2.0-py3-none-any.whl", hash = "sha256:f489df97833e12ca0360a098eb9d04e410752840416be87ab60b0a3e120a99fe", size = 49512, upload-time = "2025-11-24T14:17:16.048Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/4f/7a5b32764addf4b757545b89899b9d76688176f19e4ee89868e3b8bbfd0f/langchain_anthropic-1.3.1-py3-none-any.whl", hash = "sha256:1fc28cf8037c30597ee6172fc2ff9e345efe8149a8c2a39897b1eebba2948322", size = 46328, upload-time = "2026-01-05T21:07:18.261Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "1.1.0"
|
||||
version = "1.2.6"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
@@ -879,25 +780,26 @@ dependencies = [
|
||||
{ name = "pyyaml" },
|
||||
{ name = "tenacity" },
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "uuid-utils" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1e/17/67c1cc2ace919e2b02dd9d783154d7fb3f1495a4ef835d9cd163b7855ac2/langchain_core-1.1.0.tar.gz", hash = "sha256:2b76a82d427922c8bc51c08404af4fc2a29e9f161dfe2297cb05091e810201e7", size = 781995, upload-time = "2025-11-21T21:01:26.958Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b9/ce/ba5ed5ea6df22965b2893c2ed28ebb456204962723d408904c4acfa5e942/langchain_core-1.2.6.tar.gz", hash = "sha256:b4e7841dd7f8690375aa07c54739178dc2c635147d475e0c2955bf82a1afa498", size = 833343, upload-time = "2026-01-02T21:35:44.749Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/71/1e/e129fc471a2d2a7b3804480a937b5ab9319cab9f4142624fcb115f925501/langchain_core-1.1.0-py3-none-any.whl", hash = "sha256:2c9f27dadc6d21ed4aa46506a37a56e6a7e2d2f9141922dc5c251ba921822ee6", size = 473752, upload-time = "2025-11-21T21:01:25.841Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/40/0655892c245d8fbe6bca6d673ab5927e5c3ab7be143de40b52289a0663bc/langchain_core-1.2.6-py3-none-any.whl", hash = "sha256:aa6ed954b4b1f4504937fe75fdf674317027e9a91ba7a97558b0de3dc8004e34", size = 489096, upload-time = "2026-01-02T21:35:43.391Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-google-genai"
|
||||
version = "3.2.0"
|
||||
version = "4.1.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "filetype" },
|
||||
{ name = "google-ai-generativelanguage" },
|
||||
{ name = "google-genai" },
|
||||
{ name = "langchain-core" },
|
||||
{ name = "pydantic" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ff/27/f3c8f47b7c194c42a7ea38e5b91b412c4bd45f97e702a96edad659312437/langchain_google_genai-3.2.0.tar.gz", hash = "sha256:1fa620ea9c655a37537e95438857c423e1a3599b5a665b8dd87064c76ee95b72", size = 242146, upload-time = "2025-11-24T14:33:11.205Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ba/85/078d5aba488a82a53b8372ac1037dee4f64b020bac69e6a07e37a5059059/langchain_google_genai-4.1.3.tar.gz", hash = "sha256:28966c8fe58c9a401fdc37aeeeb0eb51744210803838ce050f022fc53d2f994e", size = 277024, upload-time = "2026-01-05T23:29:34.362Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/83/9d/c79a367e3379cf6b7d0cc43d558a411a5097d55291f2ce2f573420adb523/langchain_google_genai-3.2.0-py3-none-any.whl", hash = "sha256:689fc159d4623a184678e24771f6d52373e983a8fc8d342e44352aaf28e9445d", size = 57604, upload-time = "2025-11-24T14:33:10.112Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/aa/ca61dc2d202a23d7605a5c0ea24bd86a39a5c23c932a166b87c7797747c5/langchain_google_genai-4.1.3-py3-none-any.whl", hash = "sha256:5d710e2dcf449d49704bdbcd31729be90b386fa008395f9552a5c090241de1a5", size = 66262, upload-time = "2026-01-05T23:29:32.924Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1259,33 +1161,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proto-plus"
|
||||
version = "1.27.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "protobuf" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/01/89/9cbe2f4bba860e149108b683bc2efec21f14d5f7ed6e25562ad86acbc373/proto_plus-1.27.0.tar.gz", hash = "sha256:873af56dd0d7e91836aee871e5799e1c6f1bda86ac9a983e0bb9f0c266a568c4", size = 56158, upload-time = "2025-12-16T13:46:25.729Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cd/24/3b7a0818484df9c28172857af32c2397b6d8fcd99d9468bd4684f98ebf0a/proto_plus-1.27.0-py3-none-any.whl", hash = "sha256:1baa7f81cf0f8acb8bc1f6d085008ba4171eaf669629d1b6d1673b21ed1c0a82", size = 50205, upload-time = "2025-12-16T13:46:24.76Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "protobuf"
|
||||
version = "6.33.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/34/44/e49ecff446afeec9d1a66d6bbf9adc21e3c7cea7803a920ca3773379d4f6/protobuf-6.33.2.tar.gz", hash = "sha256:56dc370c91fbb8ac85bc13582c9e373569668a290aa2e66a590c2a0d35ddb9e4", size = 444296, upload-time = "2025-12-06T00:17:53.311Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/91/1e3a34881a88697a7354ffd177e8746e97a722e5e8db101544b47e84afb1/protobuf-6.33.2-cp310-abi3-win32.whl", hash = "sha256:87eb388bd2d0f78febd8f4c8779c79247b26a5befad525008e49a6955787ff3d", size = 425603, upload-time = "2025-12-06T00:17:41.114Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/64/20/4d50191997e917ae13ad0a235c8b42d8c1ab9c3e6fd455ca16d416944355/protobuf-6.33.2-cp310-abi3-win_amd64.whl", hash = "sha256:fc2a0e8b05b180e5fc0dd1559fe8ebdae21a27e81ac77728fb6c42b12c7419b4", size = 436930, upload-time = "2025-12-06T00:17:43.278Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/ca/7e485da88ba45c920fb3f50ae78de29ab925d9e54ef0de678306abfbb497/protobuf-6.33.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d9b19771ca75935b3a4422957bc518b0cecb978b31d1dd12037b088f6bcc0e43", size = 427621, upload-time = "2025-12-06T00:17:44.445Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7d/4f/f743761e41d3b2b2566748eb76bbff2b43e14d5fcab694f494a16458b05f/protobuf-6.33.2-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:b5d3b5625192214066d99b2b605f5783483575656784de223f00a8d00754fc0e", size = 324460, upload-time = "2025-12-06T00:17:45.678Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b1/fa/26468d00a92824020f6f2090d827078c09c9c587e34cbfd2d0c7911221f8/protobuf-6.33.2-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:8cd7640aee0b7828b6d03ae518b5b4806fdfc1afe8de82f79c3454f8aef29872", size = 339168, upload-time = "2025-12-06T00:17:46.813Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/56/13/333b8f421738f149d4fe5e49553bc2a2ab75235486259f689b4b91f96cec/protobuf-6.33.2-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:1f8017c48c07ec5859106533b682260ba3d7c5567b1ca1f24297ce03384d1b4f", size = 323270, upload-time = "2025-12-06T00:17:48.253Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/15/4f02896cc3df04fc465010a4c6a0cd89810f54617a32a70ef531ed75d61c/protobuf-6.33.2-py3-none-any.whl", hash = "sha256:7636aad9bb01768870266de5dc009de2d1b936771b38a793f73cbbf279c91c5c", size = 170501, upload-time = "2025-12-06T00:17:52.211Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyasn1"
|
||||
version = "0.6.1"
|
||||
@@ -1955,6 +1830,35 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "uuid-utils"
|
||||
version = "0.12.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0b/0e/512fb221e4970c2f75ca9dae412d320b7d9ddc9f2b15e04ea8e44710396c/uuid_utils-0.12.0.tar.gz", hash = "sha256:252bd3d311b5d6b7f5dfce7a5857e27bb4458f222586bb439463231e5a9cbd64", size = 20889, upload-time = "2025-12-01T17:29:55.494Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/43/de5cd49a57b6293b911b6a9a62fc03e55db9f964da7d5882d9edbee1e9d2/uuid_utils-0.12.0-cp39-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:3b9b30707659292f207b98f294b0e081f6d77e1fbc760ba5b41331a39045f514", size = 603197, upload-time = "2025-12-01T17:29:30.104Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/fa/5fd1d8c9234e44f0c223910808cde0de43bb69f7df1349e49b1afa7f2baa/uuid_utils-0.12.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:add3d820c7ec14ed37317375bea30249699c5d08ff4ae4dbee9fc9bce3bfbf65", size = 305168, upload-time = "2025-12-01T17:29:31.384Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/c6/8633ac9942bf9dc97a897b5154e5dcffa58816ec4dd780b3b12b559ff05c/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b8fce83ecb3b16af29c7809669056c4b6e7cc912cab8c6d07361645de12dd79", size = 340580, upload-time = "2025-12-01T17:29:32.362Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/88/8a61307b04b4da1c576373003e6d857a04dade52ab035151d62cb84d5cb5/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec921769afcb905035d785582b0791d02304a7850fbd6ce924c1a8976380dfc6", size = 346771, upload-time = "2025-12-01T17:29:33.708Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/fb/aab2dcf94b991e62aa167457c7825b9b01055b884b888af926562864398c/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6f3b060330f5899a92d5c723547dc6a95adef42433e9748f14c66859a7396664", size = 474781, upload-time = "2025-12-01T17:29:35.237Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/7a/dbd5e49c91d6c86dba57158bbfa0e559e1ddf377bb46dcfd58aea4f0d567/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:908dfef7f0bfcf98d406e5dc570c25d2f2473e49b376de41792b6e96c1d5d291", size = 343685, upload-time = "2025-12-01T17:29:36.677Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/19/8c4b1d9f450159733b8be421a4e1fb03533709b80ed3546800102d085572/uuid_utils-0.12.0-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4c6a24148926bd0ca63e8a2dabf4cc9dc329a62325b3ad6578ecd60fbf926506", size = 366482, upload-time = "2025-12-01T17:29:37.979Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/82/43/c79a6e45687647f80a159c8ba34346f287b065452cc419d07d2212d38420/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:64a91e632669f059ef605f1771d28490b1d310c26198e46f754e8846dddf12f4", size = 523132, upload-time = "2025-12-01T17:29:39.293Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/a2/b2d75a621260a40c438aa88593827dfea596d18316520a99e839f7a5fb9d/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:93c082212470bb4603ca3975916c205a9d7ef1443c0acde8fbd1e0f5b36673c7", size = 614218, upload-time = "2025-12-01T17:29:40.315Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/6b/ba071101626edd5a6dabf8525c9a1537ff3d885dbc210540574a03901fef/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:431b1fb7283ba974811b22abd365f2726f8f821ab33f0f715be389640e18d039", size = 546241, upload-time = "2025-12-01T17:29:41.656Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/12/9a942b81c0923268e6d85bf98d8f0a61fcbcd5e432fef94fdf4ce2ef8748/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2ffd7838c40149100299fa37cbd8bab5ee382372e8e65a148002a37d380df7c8", size = 511842, upload-time = "2025-12-01T17:29:43.107Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a9/a7/c326f5163dd48b79368b87d8a05f5da4668dd228a3f5ca9d79d5fee2fc40/uuid_utils-0.12.0-cp39-abi3-win32.whl", hash = "sha256:487f17c0fee6cbc1d8b90fe811874174a9b1b5683bf2251549e302906a50fed3", size = 179088, upload-time = "2025-12-01T17:29:44.492Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/92/41c8734dd97213ee1d5ae435cf4499705dc4f2751e3b957fd12376f61784/uuid_utils-0.12.0-cp39-abi3-win_amd64.whl", hash = "sha256:9598e7c9da40357ae8fffc5d6938b1a7017f09a1acbcc95e14af8c65d48c655a", size = 183003, upload-time = "2025-12-01T17:29:45.47Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/f9/52ab0359618987331a1f739af837d26168a4b16281c9c3ab46519940c628/uuid_utils-0.12.0-cp39-abi3-win_arm64.whl", hash = "sha256:c9bea7c5b2aa6f57937ebebeee4d4ef2baad10f86f1b97b58a3f6f34c14b4e84", size = 182975, upload-time = "2025-12-01T17:29:46.444Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ef/f7/6c55b7722cede3b424df02ed5cddb25c19543abda2f95fa4cfc34a892ae5/uuid_utils-0.12.0-pp311-pypy311_pp73-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:e2209d361f2996966ab7114f49919eb6aaeabc6041672abbbbf4fdbb8ec1acc0", size = 593065, upload-time = "2025-12-01T17:29:47.507Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/40/ce5fe8e9137dbd5570e0016c2584fca43ad81b11a1cef809a1a1b4952ab7/uuid_utils-0.12.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d9636bcdbd6cfcad2b549c352b669412d0d1eb09be72044a2f13e498974863cd", size = 300047, upload-time = "2025-12-01T17:29:48.596Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/9b/31c5d0736d7b118f302c50214e581f40e904305d8872eb0f0c921d50e138/uuid_utils-0.12.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cd8543a3419251fb78e703ce3b15fdfafe1b7c542cf40caf0775e01db7e7674", size = 335165, upload-time = "2025-12-01T17:29:49.755Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/5c/d80b4d08691c9d7446d0ad58fd41503081a662cfd2c7640faf68c64d8098/uuid_utils-0.12.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e98db2d8977c052cb307ae1cb5cc37a21715e8d415dbc65863b039397495a013", size = 341437, upload-time = "2025-12-01T17:29:51.112Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/b3/9dccdc6f3c22f6ef5bd381ae559173f8a1ae185ae89ed1f39f499d9d8b02/uuid_utils-0.12.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8f2bdf5e4ffeb259ef6d15edae92aed60a1d6f07cbfab465d836f6b12b48da8", size = 469123, upload-time = "2025-12-01T17:29:52.389Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fd/90/6c35ef65fbc49f8189729839b793a4a74a7dd8c5aa5eb56caa93f8c97732/uuid_utils-0.12.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c3ec53c0cb15e1835870c139317cc5ec06e35aa22843e3ed7d9c74f23f23898", size = 335892, upload-time = "2025-12-01T17:29:53.44Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6b/c7/e3f3ce05c5af2bf86a0938d22165affe635f4dcbfd5687b1dacc042d3e0e/uuid_utils-0.12.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:84e5c0eba209356f7f389946a3a47b2cc2effd711b3fc7c7f155ad9f7d45e8a3", size = 360693, upload-time = "2025-12-01T17:29:54.558Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wcmatch"
|
||||
version = "10.1"
|
||||
@@ -1967,6 +1871,48 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/d8/0d1d2e9d3fabcf5d6840362adcf05f8cf3cd06a73358140c3a97189238ae/wcmatch-10.1-py3-none-any.whl", hash = "sha256:5848ace7dbb0476e5e55ab63c6bbd529745089343427caa5537f230cc01beb8a", size = 39854, upload-time = "2025-06-22T19:14:00.978Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "websockets"
|
||||
version = "15.0.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/21/e6/26d09fab466b7ca9c7737474c52be4f76a40301b08362eb2dbc19dcc16c1/websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee", size = 177016, upload-time = "2025-03-05T20:03:41.606Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9f/32/18fcd5919c293a398db67443acd33fde142f283853076049824fc58e6f75/websockets-15.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431", size = 175423, upload-time = "2025-03-05T20:01:56.276Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/76/70/ba1ad96b07869275ef42e2ce21f07a5b0148936688c2baf7e4a1f60d5058/websockets-15.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57", size = 173082, upload-time = "2025-03-05T20:01:57.563Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/f2/10b55821dd40eb696ce4704a87d57774696f9451108cff0d2824c97e0f97/websockets-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905", size = 173330, upload-time = "2025-03-05T20:01:59.063Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a5/90/1c37ae8b8a113d3daf1065222b6af61cc44102da95388ac0018fcb7d93d9/websockets-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562", size = 182878, upload-time = "2025-03-05T20:02:00.305Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8e/8d/96e8e288b2a41dffafb78e8904ea7367ee4f891dafc2ab8d87e2124cb3d3/websockets-15.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792", size = 181883, upload-time = "2025-03-05T20:02:03.148Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/93/1f/5d6dbf551766308f6f50f8baf8e9860be6182911e8106da7a7f73785f4c4/websockets-15.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413", size = 182252, upload-time = "2025-03-05T20:02:05.29Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d4/78/2d4fed9123e6620cbf1706c0de8a1632e1a28e7774d94346d7de1bba2ca3/websockets-15.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8", size = 182521, upload-time = "2025-03-05T20:02:07.458Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/3b/66d4c1b444dd1a9823c4a81f50231b921bab54eee2f69e70319b4e21f1ca/websockets-15.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3", size = 181958, upload-time = "2025-03-05T20:02:09.842Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/08/ff/e9eed2ee5fed6f76fdd6032ca5cd38c57ca9661430bb3d5fb2872dc8703c/websockets-15.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf", size = 181918, upload-time = "2025-03-05T20:02:11.968Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d8/75/994634a49b7e12532be6a42103597b71098fd25900f7437d6055ed39930a/websockets-15.0.1-cp311-cp311-win32.whl", hash = "sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85", size = 176388, upload-time = "2025-03-05T20:02:13.32Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/93/e36c73f78400a65f5e236cd376713c34182e6663f6889cd45a4a04d8f203/websockets-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065", size = 176828, upload-time = "2025-03-05T20:02:14.585Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/51/6b/4545a0d843594f5d0771e86463606a3988b5a09ca5123136f8a76580dd63/websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3", size = 175437, upload-time = "2025-03-05T20:02:16.706Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f4/71/809a0f5f6a06522af902e0f2ea2757f71ead94610010cf570ab5c98e99ed/websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665", size = 173096, upload-time = "2025-03-05T20:02:18.832Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/69/1a681dd6f02180916f116894181eab8b2e25b31e484c5d0eae637ec01f7c/websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2", size = 173332, upload-time = "2025-03-05T20:02:20.187Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/02/0073b3952f5bce97eafbb35757f8d0d54812b6174ed8dd952aa08429bcc3/websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215", size = 183152, upload-time = "2025-03-05T20:02:22.286Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/45/c205c8480eafd114b428284840da0b1be9ffd0e4f87338dc95dc6ff961a1/websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5", size = 182096, upload-time = "2025-03-05T20:02:24.368Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/14/8f/aa61f528fba38578ec553c145857a181384c72b98156f858ca5c8e82d9d3/websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65", size = 182523, upload-time = "2025-03-05T20:02:25.669Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/6d/0267396610add5bc0d0d3e77f546d4cd287200804fe02323797de77dbce9/websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe", size = 182790, upload-time = "2025-03-05T20:02:26.99Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/05/c68c5adbf679cf610ae2f74a9b871ae84564462955d991178f95a1ddb7dd/websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4", size = 182165, upload-time = "2025-03-05T20:02:30.291Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/93/bb672df7b2f5faac89761cb5fa34f5cec45a4026c383a4b5761c6cea5c16/websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597", size = 182160, upload-time = "2025-03-05T20:02:31.634Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/83/de1f7709376dc3ca9b7eeb4b9a07b4526b14876b6d372a4dc62312bebee0/websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9", size = 176395, upload-time = "2025-03-05T20:02:33.017Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7d/71/abf2ebc3bbfa40f391ce1428c7168fb20582d0ff57019b69ea20fa698043/websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7", size = 176841, upload-time = "2025-03-05T20:02:34.498Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/9f/51f0cf64471a9d2b4d0fc6c534f323b664e7095640c34562f5182e5a7195/websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931", size = 175440, upload-time = "2025-03-05T20:02:36.695Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/05/aa116ec9943c718905997412c5989f7ed671bc0188ee2ba89520e8765d7b/websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675", size = 173098, upload-time = "2025-03-05T20:02:37.985Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/0b/33cef55ff24f2d92924923c99926dcce78e7bd922d649467f0eda8368923/websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151", size = 173329, upload-time = "2025-03-05T20:02:39.298Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/1d/063b25dcc01faa8fada1469bdf769de3768b7044eac9d41f734fd7b6ad6d/websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22", size = 183111, upload-time = "2025-03-05T20:02:40.595Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/93/53/9a87ee494a51bf63e4ec9241c1ccc4f7c2f45fff85d5bde2ff74fcb68b9e/websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f", size = 182054, upload-time = "2025-03-05T20:02:41.926Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/b2/83a6ddf56cdcbad4e3d841fcc55d6ba7d19aeb89c50f24dd7e859ec0805f/websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8", size = 182496, upload-time = "2025-03-05T20:02:43.304Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/41/e7038944ed0abf34c45aa4635ba28136f06052e08fc2168520bb8b25149f/websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375", size = 182829, upload-time = "2025-03-05T20:02:48.812Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e0/17/de15b6158680c7623c6ef0db361da965ab25d813ae54fcfeae2e5b9ef910/websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d", size = 182217, upload-time = "2025-03-05T20:02:50.14Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/2b/1f168cb6041853eef0362fb9554c3824367c5560cbdaad89ac40f8c2edfc/websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4", size = 182195, upload-time = "2025-03-05T20:02:51.561Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/eb/20b6cdf273913d0ad05a6a14aed4b9a85591c18a987a3d47f20fa13dcc47/websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa", size = 176393, upload-time = "2025-03-05T20:02:53.814Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1b/6c/c65773d6cab416a64d191d6ee8a8b1c68a09970ea6909d16965d26bfed1e/websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561", size = 176837, upload-time = "2025-03-05T20:02:55.237Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "xxhash"
|
||||
version = "3.6.0"
|
||||
|
||||
@@ -15,17 +15,17 @@ from harbor.environments.base import BaseEnvironment
|
||||
|
||||
|
||||
class HarborSandbox(SandboxBackendProtocol):
|
||||
"""Python3가 사용 가능하다고 가정하지 않는 샌드박스 구현체입니다."""
|
||||
"""A sandbox implementation without assuming that python3 is available."""
|
||||
|
||||
def __init__(self, environment: BaseEnvironment) -> None:
|
||||
"""제공된 환경으로 HarborSandbox를 초기화합니다."""
|
||||
"""Initialize HarborSandbox with the given environment."""
|
||||
self.environment = environment
|
||||
|
||||
async def aexecute(
|
||||
self,
|
||||
command: str,
|
||||
) -> ExecuteResponse:
|
||||
"""작업 환경에서 bash 명령을 실행합니다."""
|
||||
"""Execute a bash command in the task environment."""
|
||||
result = await self.environment.exec(command)
|
||||
|
||||
# These errors appear in harbor environments when running bash commands
|
||||
@@ -71,12 +71,12 @@ class HarborSandbox(SandboxBackendProtocol):
|
||||
self,
|
||||
command: str,
|
||||
) -> ExecuteResponse:
|
||||
"""작업 환경에서 bash 명령을 실행합니다."""
|
||||
raise NotImplementedError("이 백엔드는 비동기 실행만 지원합니다")
|
||||
"""Execute a bash command in the task environment."""
|
||||
raise NotImplementedError("This backend only supports async execution")
|
||||
|
||||
@property
|
||||
def id(self) -> str:
|
||||
"""샌드박스 백엔드의 고유 식별자입니다."""
|
||||
"""Unique identifier for the sandbox backend."""
|
||||
return self.environment.session_id
|
||||
|
||||
async def aread(
|
||||
@@ -85,18 +85,18 @@ class HarborSandbox(SandboxBackendProtocol):
|
||||
offset: int = 0,
|
||||
limit: int = 2000,
|
||||
) -> str:
|
||||
"""셸 명령을 사용하여 줄 번호가 있는 파일 내용을 읽습니다."""
|
||||
"""Read file content with line numbers using shell commands."""
|
||||
# Escape file path for shell
|
||||
safe_path = shlex.quote(file_path)
|
||||
|
||||
# Check if file exists and handle empty files
|
||||
cmd = f"""
|
||||
if [ ! -f {safe_path} ]; then
|
||||
echo "오류: 파일을 찾을 수 없습니다"
|
||||
echo "Error: File not found"
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -s {safe_path} ]; then
|
||||
echo "시스템 알림: 파일이 존재하지만 내용이 비어 있습니다"
|
||||
echo "System reminder: File exists but has empty contents"
|
||||
exit 0
|
||||
fi
|
||||
# Use awk to add line numbers and handle offset/limit
|
||||
@@ -109,8 +109,8 @@ awk -v offset={offset} -v limit={limit} '
|
||||
"""
|
||||
result = await self.aexecute(cmd)
|
||||
|
||||
if result.exit_code != 0 or "오류: 파일을 찾을 수 없습니다" in result.output:
|
||||
return f"오류: 파일 '{file_path}'을(를) 찾을 수 없습니다"
|
||||
if result.exit_code != 0 or "Error: File not found" in result.output:
|
||||
return f"Error: File '{file_path}' not found"
|
||||
|
||||
return result.output.rstrip()
|
||||
|
||||
@@ -120,22 +120,22 @@ awk -v offset={offset} -v limit={limit} '
|
||||
offset: int = 0,
|
||||
limit: int = 2000,
|
||||
) -> str:
|
||||
"""셸 명령을 사용하여 줄 번호가 있는 파일 내용을 읽습니다."""
|
||||
raise NotImplementedError("aread를 사용하십시오")
|
||||
"""Read file content with line numbers using shell commands."""
|
||||
raise NotImplementedError("Use aread instead")
|
||||
|
||||
async def awrite(
|
||||
self,
|
||||
file_path: str,
|
||||
content: str,
|
||||
) -> WriteResult:
|
||||
"""셸 명령을 사용하여 새 파일을 생성합니다."""
|
||||
"""Create a new file using shell commands."""
|
||||
# Encode content as base64 to avoid escaping issues
|
||||
content_b64 = base64.b64encode(content.encode("utf-8")).decode("ascii")
|
||||
safe_path = shlex.quote(file_path)
|
||||
|
||||
cmd = f"""
|
||||
if [ -e {safe_path} ]; then
|
||||
echo "오류: 파일 '{file_path}'이(가) 이미 존재합니다" >&2
|
||||
echo "Error: File '{file_path}' already exists" >&2
|
||||
exit 1
|
||||
fi
|
||||
parent_dir=$(dirname {safe_path})
|
||||
@@ -145,7 +145,7 @@ echo '{content_b64}' | base64 -d > {safe_path}
|
||||
result = await self.aexecute(cmd)
|
||||
|
||||
if result.exit_code != 0 or "Error:" in result.output:
|
||||
error_msg = result.output.strip() or f"파일 '{file_path}' 쓰기 실패"
|
||||
error_msg = result.output.strip() or f"Failed to write file '{file_path}'"
|
||||
return WriteResult(error=error_msg)
|
||||
|
||||
return WriteResult(path=file_path, files_update=None)
|
||||
@@ -155,8 +155,8 @@ echo '{content_b64}' | base64 -d > {safe_path}
|
||||
file_path: str,
|
||||
content: str,
|
||||
) -> WriteResult:
|
||||
"""셸 명령을 사용하여 새 파일을 생성합니다."""
|
||||
raise NotImplementedError("awrite를 사용하십시오")
|
||||
"""Create a new file using shell commands."""
|
||||
raise NotImplementedError("Use awrite instead")
|
||||
|
||||
async def aedit(
|
||||
self,
|
||||
@@ -165,7 +165,7 @@ echo '{content_b64}' | base64 -d > {safe_path}
|
||||
new_string: str,
|
||||
replace_all: bool = False,
|
||||
) -> EditResult:
|
||||
"""셸 명령을 사용하여 문자열 조회를 대체하여 파일을 편집합니다."""
|
||||
"""Edit a file by replacing string occurrences using shell commands."""
|
||||
# Encode strings as base64 to avoid escaping issues
|
||||
old_b64 = base64.b64encode(old_string.encode("utf-8")).decode("ascii")
|
||||
new_b64 = base64.b64encode(new_string.encode("utf-8")).decode("ascii")
|
||||
@@ -205,15 +205,15 @@ echo "$count"
|
||||
output = result.output.strip()
|
||||
|
||||
if exit_code == 1:
|
||||
return EditResult(error=f"오류: 파일에서 문자열을 찾을 수 없습니다: '{old_string}'")
|
||||
return EditResult(error=f"Error: String not found in file: '{old_string}'")
|
||||
if exit_code == 2:
|
||||
return EditResult(
|
||||
error=f"오류: 문자열 '{old_string}'이(가) 여러 번 나옵니다. 모든 항목을 바꾸려면 replace_all=True를 사용하십시오."
|
||||
error=f"Error: String '{old_string}' appears multiple times. Use replace_all=True to replace all occurrences."
|
||||
)
|
||||
if exit_code == 3:
|
||||
return EditResult(error=f"오류: 파일 '{file_path}'을(를) 찾을 수 없습니다")
|
||||
return EditResult(error=f"Error: File '{file_path}' not found")
|
||||
if exit_code != 0:
|
||||
return EditResult(error=f"파일 편집 오류: {output}")
|
||||
return EditResult(error=f"Error editing file: {output}")
|
||||
|
||||
try:
|
||||
count = int(output.split("\n")[0])
|
||||
@@ -229,11 +229,11 @@ echo "$count"
|
||||
new_string: str,
|
||||
replace_all: bool = False,
|
||||
) -> EditResult:
|
||||
"""셸 명령을 사용하여 문자열 조회를 대체하여 파일을 편집합니다."""
|
||||
raise NotImplementedError("aedit를 사용하십시오")
|
||||
"""Edit a file by replacing string occurrences using shell commands."""
|
||||
raise NotImplementedError("Use aedit instead")
|
||||
|
||||
async def als_info(self, path: str) -> list[FileInfo]:
|
||||
"""셸 명령을 사용하여 디렉터리 내용과 메타데이터를 나열합니다."""
|
||||
"""List directory contents with metadata using shell commands."""
|
||||
safe_path = shlex.quote(path)
|
||||
|
||||
cmd = f"""
|
||||
@@ -267,8 +267,8 @@ done
|
||||
return file_infos
|
||||
|
||||
def ls_info(self, path: str) -> list[FileInfo]:
|
||||
"""셸 명령을 사용하여 디렉터리 내용과 메타데이터를 나열합니다."""
|
||||
raise NotImplementedError("als_info를 사용하십시오")
|
||||
"""List directory contents with metadata using shell commands."""
|
||||
raise NotImplementedError("Use als_info instead")
|
||||
|
||||
async def agrep_raw(
|
||||
self,
|
||||
@@ -276,7 +276,7 @@ done
|
||||
path: str | None = None,
|
||||
glob: str | None = None,
|
||||
) -> list[GrepMatch] | str:
|
||||
"""grep을 사용하여 파일에서 패턴을 검색합니다."""
|
||||
"""Search for pattern in files using grep."""
|
||||
search_path = shlex.quote(path or ".")
|
||||
|
||||
# Build grep command
|
||||
@@ -304,11 +304,13 @@ done
|
||||
parts = line.split(":", 2)
|
||||
if len(parts) >= 3:
|
||||
try:
|
||||
matches.append({
|
||||
"path": parts[0],
|
||||
"line": int(parts[1]),
|
||||
"text": parts[2],
|
||||
})
|
||||
matches.append(
|
||||
{
|
||||
"path": parts[0],
|
||||
"line": int(parts[1]),
|
||||
"text": parts[2],
|
||||
}
|
||||
)
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
@@ -320,13 +322,14 @@ done
|
||||
path: str | None = None,
|
||||
glob: str | None = None,
|
||||
) -> list[GrepMatch] | str:
|
||||
"""grep을 사용하여 파일에서 패턴을 검색합니다."""
|
||||
raise NotImplementedError("agrep_raw를 사용하십시오")
|
||||
"""Search for pattern in files using grep."""
|
||||
raise NotImplementedError("Use agrep_raw instead")
|
||||
|
||||
async def aglob_info(self, pattern: str, path: str = "/") -> list[FileInfo]:
|
||||
"""셸 명령을 사용하여 glob 패턴과 일치하는 파일을 찾습니다.
|
||||
"""Find files matching glob pattern using shell commands.
|
||||
|
||||
이 구현은 현재 모든 glob 패턴을 지원하지는 않습니다.
|
||||
Please note that this implementation does not currently support all glob
|
||||
patterns.
|
||||
"""
|
||||
safe_path = shlex.quote(path)
|
||||
safe_pattern = shlex.quote(pattern)
|
||||
@@ -360,13 +363,15 @@ done
|
||||
continue
|
||||
parts = line.split("|")
|
||||
if len(parts) == 2:
|
||||
file_infos.append({
|
||||
"path": parts[0],
|
||||
"is_dir": parts[1] == "true",
|
||||
})
|
||||
file_infos.append(
|
||||
{
|
||||
"path": parts[0],
|
||||
"is_dir": parts[1] == "true",
|
||||
}
|
||||
)
|
||||
|
||||
return file_infos
|
||||
|
||||
def glob_info(self, pattern: str, path: str = "/") -> list[FileInfo]:
|
||||
"""셸 명령을 사용하여 glob 패턴과 일치하는 파일을 찾습니다."""
|
||||
raise NotImplementedError("aglob_info를 사용하십시오")
|
||||
"""Find files matching glob pattern using shell commands."""
|
||||
raise NotImplementedError("Use aglob_info instead")
|
||||
|
||||
@@ -68,15 +68,15 @@ class DeepAgentsWrapper(BaseAgent):
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""DeepAgentsWrapper를 초기화합니다.
|
||||
"""Initialize DeepAgentsWrapper.
|
||||
|
||||
Args:
|
||||
logs_dir: 로그를 저장할 디렉터리
|
||||
model_name: 사용할 LLM 모델의 이름
|
||||
temperature: 모델의 온도 설정
|
||||
verbose: 상세 출력 활성화
|
||||
use_cli_agent: True인 경우 deepagents-cli의 create_cli_agent를 사용(기본값).
|
||||
False인 경우 SDK의 create_deep_agent를 사용.
|
||||
logs_dir: Directory for storing logs
|
||||
model_name: Name of the LLM model to use
|
||||
temperature: Temperature setting for the model
|
||||
verbose: Enable verbose output
|
||||
use_cli_agent: If True, use create_cli_agent from deepagents-cli (default).
|
||||
If False, use create_deep_agent from SDK.
|
||||
"""
|
||||
super().__init__(logs_dir, model_name, *args, **kwargs)
|
||||
|
||||
@@ -166,7 +166,9 @@ class DeepAgentsWrapper(BaseAgent):
|
||||
"""
|
||||
configuration = json.loads(environment.trial_paths.config_path.read_text())
|
||||
if not isinstance(configuration, dict):
|
||||
raise AssertionError(f"Unexpected configuration format. Expected a dict got {type(configuration)}.")
|
||||
raise AssertionError(
|
||||
f"Unexpected configuration format. Expected a dict got {type(configuration)}."
|
||||
)
|
||||
|
||||
backend = HarborSandbox(environment)
|
||||
|
||||
@@ -192,7 +194,9 @@ class DeepAgentsWrapper(BaseAgent):
|
||||
# Get formatted system prompt with directory context
|
||||
system_prompt = await self._get_formatted_system_prompt(backend)
|
||||
|
||||
deep_agent = create_deep_agent(model=self._model, backend=backend, system_prompt=system_prompt)
|
||||
deep_agent = create_deep_agent(
|
||||
model=self._model, backend=backend, system_prompt=system_prompt
|
||||
)
|
||||
|
||||
# Build metadata with experiment tracking info
|
||||
metadata = {
|
||||
@@ -252,7 +256,9 @@ class DeepAgentsWrapper(BaseAgent):
|
||||
|
||||
self._save_trajectory(environment, instruction, result)
|
||||
|
||||
def _save_trajectory(self, environment: BaseEnvironment, instruction: str, result: dict) -> None:
|
||||
def _save_trajectory(
|
||||
self, environment: BaseEnvironment, instruction: str, result: dict
|
||||
) -> None:
|
||||
"""Save current trajectory to logs directory."""
|
||||
# Track token usage and cost for this run
|
||||
total_prompt_tokens = 0
|
||||
@@ -334,7 +340,9 @@ class DeepAgentsWrapper(BaseAgent):
|
||||
elif isinstance(msg, HumanMessage):
|
||||
pass
|
||||
else:
|
||||
raise NotImplementedError(f"Message type {type(msg)} not supported for step conversion")
|
||||
raise NotImplementedError(
|
||||
f"Message type {type(msg)} not supported for step conversion"
|
||||
)
|
||||
|
||||
# Add any remaining pending step
|
||||
if pending_step is not None:
|
||||
|
||||
@@ -1,21 +1,21 @@
|
||||
"""Harbor DeepAgents를 위한 LangSmith 통합."""
|
||||
"""LangSmith integration for Harbor DeepAgents."""
|
||||
|
||||
import hashlib
|
||||
import uuid
|
||||
|
||||
|
||||
def create_example_id_from_instruction(instruction: str, seed: int = 42) -> str:
|
||||
"""지침(instruction) 문자열에서 결정론적(deterministic) UUID를 생성합니다.
|
||||
"""Create a deterministic UUID from an instruction string.
|
||||
|
||||
지침을 정규화(앞뒤 공백 제거)하고 SHA-256 해시를 생성한 다음,
|
||||
LangSmith 호환성을 위해 UUID로 변환합니다.
|
||||
Normalizes the instruction by stripping whitespace and creating a
|
||||
SHA-256 hash, then converting to a UUID for LangSmith compatibility.
|
||||
|
||||
Args:
|
||||
instruction: 해시할 작업 지침 문자열
|
||||
seed: 기존 예제와의 충돌을 피하기 위한 정수 시드
|
||||
instruction: The task instruction string to hash
|
||||
seed: Integer seed to avoid collisions with existing examples
|
||||
|
||||
Returns:
|
||||
정규화된 지침의 해시에서 생성된 UUID 문자열
|
||||
A UUID string generated from the hash of the normalized instruction
|
||||
"""
|
||||
# Normalize the instruction: strip leading/trailing whitespace
|
||||
normalized = instruction.strip()
|
||||
|
||||
368
deepagents_sourcecode/libs/harbor/uv.lock
generated
368
deepagents_sourcecode/libs/harbor/uv.lock
generated
@@ -1,5 +1,5 @@
|
||||
version = 1
|
||||
revision = 2
|
||||
revision = 3
|
||||
requires-python = ">=3.12"
|
||||
|
||||
[manifest]
|
||||
@@ -136,6 +136,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aiosqlite"
|
||||
version = "0.22.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/4e/8a/64761f4005f17809769d23e518d915db74e6310474e733e3593cfc854ef1/aiosqlite-0.22.1.tar.gz", hash = "sha256:043e0bd78d32888c0a9ca90fc788b38796843360c855a7262a532813133a0650", size = 14821, upload-time = "2025-12-23T19:25:43.997Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/00/b7/e3bf5133d697a08128598c8d0abc5e16377b51465a33756de24fa7dee953/aiosqlite-0.22.1-py3-none-any.whl", hash = "sha256:21c002eb13823fad740196c5a2e9d8e62f6243bd9e7e4a1f87fb5e44ecb4fceb", size = 17405, upload-time = "2025-12-23T19:25:42.139Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "annotated-types"
|
||||
version = "0.7.0"
|
||||
@@ -518,20 +527,22 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "deepagents"
|
||||
version = "0.2.8"
|
||||
version = "0.3.5"
|
||||
source = { directory = "../deepagents" }
|
||||
dependencies = [
|
||||
{ name = "langchain" },
|
||||
{ name = "langchain-anthropic" },
|
||||
{ name = "langchain-core" },
|
||||
{ name = "langchain-google-genai" },
|
||||
{ name = "wcmatch" },
|
||||
]
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "langchain", specifier = ">=1.1.0,<2.0.0" },
|
||||
{ name = "langchain-anthropic", specifier = ">=1.2.0,<2.0.0" },
|
||||
{ name = "langchain-core", specifier = ">=1.1.0,<2.0.0" },
|
||||
{ name = "langchain", specifier = ">=1.2.3,<2.0.0" },
|
||||
{ name = "langchain-anthropic", specifier = ">=1.3.1,<2.0.0" },
|
||||
{ name = "langchain-core", specifier = ">=1.2.6,<2.0.0" },
|
||||
{ name = "langchain-google-genai", specifier = ">=4.1.3,<5.0.0" },
|
||||
{ name = "wcmatch" },
|
||||
]
|
||||
|
||||
@@ -552,37 +563,49 @@ test = [
|
||||
|
||||
[[package]]
|
||||
name = "deepagents-cli"
|
||||
version = "0.0.10"
|
||||
version = "0.0.12"
|
||||
source = { directory = "../deepagents-cli" }
|
||||
dependencies = [
|
||||
{ name = "aiosqlite" },
|
||||
{ name = "daytona" },
|
||||
{ name = "deepagents" },
|
||||
{ name = "langchain" },
|
||||
{ name = "langchain-openai" },
|
||||
{ name = "langgraph-checkpoint-sqlite" },
|
||||
{ name = "markdownify" },
|
||||
{ name = "modal" },
|
||||
{ name = "pillow" },
|
||||
{ name = "prompt-toolkit" },
|
||||
{ name = "python-dotenv" },
|
||||
{ name = "pyyaml" },
|
||||
{ name = "requests" },
|
||||
{ name = "rich" },
|
||||
{ name = "runloop-api-client" },
|
||||
{ name = "tavily-python" },
|
||||
{ name = "textual" },
|
||||
{ name = "textual-autocomplete" },
|
||||
]
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "aiosqlite", specifier = ">=0.19.0" },
|
||||
{ name = "daytona", specifier = ">=0.113.0" },
|
||||
{ name = "deepagents", directory = "../deepagents" },
|
||||
{ name = "langchain", specifier = ">=1.0.7" },
|
||||
{ name = "langchain-openai", specifier = ">=0.1.0" },
|
||||
{ name = "langchain", specifier = ">=1.2.3,<2.0.0" },
|
||||
{ name = "langchain-openai", specifier = ">=1.1.7,<2.0.0" },
|
||||
{ name = "langgraph-checkpoint-sqlite", specifier = ">=2.0.0,<3.0.0" },
|
||||
{ name = "markdownify", specifier = ">=0.13.0" },
|
||||
{ name = "modal", specifier = ">=0.65.0" },
|
||||
{ name = "pillow", specifier = ">=10.0.0" },
|
||||
{ name = "prompt-toolkit", specifier = ">=3.0.52" },
|
||||
{ name = "python-dotenv" },
|
||||
{ name = "pyyaml", specifier = ">=6.0" },
|
||||
{ name = "requests" },
|
||||
{ name = "rich", specifier = ">=13.0.0" },
|
||||
{ name = "runloop-api-client", specifier = ">=0.69.0" },
|
||||
{ name = "tavily-python" },
|
||||
{ name = "textual", specifier = ">=1.0.0" },
|
||||
{ name = "textual-autocomplete", specifier = ">=3.0.0" },
|
||||
]
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
@@ -799,6 +822,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/76/91/7216b27286936c16f5b4d0c530087e4a54eead683e6b0b73dd0c64844af6/filelock-3.20.0-py3-none-any.whl", hash = "sha256:339b4732ffda5cd79b13f4e2711a31b0365ce445d95d243bb996273d072546a2", size = 16054, upload-time = "2025-10-08T18:03:48.35Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "filetype"
|
||||
version = "1.2.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/bb/29/745f7d30d47fe0f251d3ad3dc2978a23141917661998763bebb6da007eb1/filetype-1.2.0.tar.gz", hash = "sha256:66b56cd6474bf41d8c54660347d37afcc3f7d1970648de365c102ef77548aadb", size = 998020, upload-time = "2022-11-02T17:34:04.141Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/18/79/1b8fa1bb3568781e84c9200f951c735f3f157429f44be0495da55894d620/filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25", size = 19970, upload-time = "2022-11-02T17:34:01.425Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "frozenlist"
|
||||
version = "1.8.0"
|
||||
@@ -902,6 +934,45 @@ http = [
|
||||
{ name = "aiohttp" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "google-auth"
|
||||
version = "2.47.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pyasn1-modules" },
|
||||
{ name = "rsa" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/60/3c/ec64b9a275ca22fa1cd3b6e77fefcf837b0732c890aa32d2bd21313d9b33/google_auth-2.47.0.tar.gz", hash = "sha256:833229070a9dfee1a353ae9877dcd2dec069a8281a4e72e72f77d4a70ff945da", size = 323719, upload-time = "2026-01-06T21:55:31.045Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/db/18/79e9008530b79527e0d5f79e7eef08d3b179b7f851cfd3a2f27822fbdfa9/google_auth-2.47.0-py3-none-any.whl", hash = "sha256:c516d68336bfde7cf0da26aab674a36fedcf04b37ac4edd59c597178760c3498", size = 234867, upload-time = "2026-01-06T21:55:28.6Z" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
requests = [
|
||||
{ name = "requests" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "google-genai"
|
||||
version = "1.57.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
{ name = "distro" },
|
||||
{ name = "google-auth", extra = ["requests"] },
|
||||
{ name = "httpx" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "requests" },
|
||||
{ name = "sniffio" },
|
||||
{ name = "tenacity" },
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "websockets" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/2b/b4/8251c2d2576224a4b51a8ab6159820f9200b8da28ff555c78ee15607096e/google_genai-1.57.0.tar.gz", hash = "sha256:0ff9c36b8d68abfbdbd13b703ece926de5f3e67955666b36315ecf669b94a826", size = 485648, upload-time = "2026-01-07T20:38:20.271Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/02/858bdae08e2184b6afe0b18bc3113318522c9cf326a5a1698055edd31f88/google_genai-1.57.0-py3-none-any.whl", hash = "sha256:d63c7a89a1f549c4d14032f41a0cdb4b6fe3f565e2eee6b5e0907a0aeceabefd", size = 713323, upload-time = "2026-01-07T20:38:18.051Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grpclib"
|
||||
version = "0.4.8"
|
||||
@@ -1222,35 +1293,35 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain"
|
||||
version = "1.1.0"
|
||||
version = "1.2.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
{ name = "langgraph" },
|
||||
{ name = "pydantic" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a1/06/be7273c6c15f5a7e64788ed2aa6329dd019170a176977acff7bcde2cdea2/langchain-1.1.0.tar.gz", hash = "sha256:583c892f59873c0329dbe04169fb3234ac794c50780e7c6fb62a61c7b86a981b", size = 528416, upload-time = "2025-11-24T15:31:24.47Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/5f/78/9565319259d92818d96f30d55507ee1072fbf5c008b95a6acecf5e47c4d6/langchain-1.2.3.tar.gz", hash = "sha256:9d6171f9c3c760ca3c7c2cf8518e6f8625380962c488b41e35ebff1f1d611077", size = 548296, upload-time = "2026-01-08T20:26:30.149Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/6f/889c01d22c84934615fa3f2dcf94c2fe76fd0afa7a7d01f9b798059f0ecc/langchain-1.1.0-py3-none-any.whl", hash = "sha256:af080f3a4a779bfa5925de7aacb6dfab83249d4aab9a08f7aa7b9bec3766d8ea", size = 101797, upload-time = "2025-11-24T15:31:23.401Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/e5/9b4f58533f8ce3013b1a993289eb11e8607d9c9d9d14699b29c6ac3b4132/langchain-1.2.3-py3-none-any.whl", hash = "sha256:5cdc7c80f672962b030c4b0d16d0d8f26d849c0ada63a4b8653a20d7505512ae", size = 106428, upload-time = "2026-01-08T20:26:29.162Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-anthropic"
|
||||
version = "1.2.0"
|
||||
version = "1.3.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anthropic" },
|
||||
{ name = "langchain-core" },
|
||||
{ name = "pydantic" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/66/f2/717dcadf0c96960154594409b68bdd5953ab95439e0b65de13cdd5c08785/langchain_anthropic-1.2.0.tar.gz", hash = "sha256:3f3cfad8c519ead2deb21c30dc538b18f4c094704c7874784320cbed7a199453", size = 688803, upload-time = "2025-11-24T14:17:17.424Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0d/b6/ac5ee84e15bf79844c9c791f99a614c7ec7e1a63c2947e55977be01a81b4/langchain_anthropic-1.3.1.tar.gz", hash = "sha256:4f3d7a4a7729ab1aeaf62d32c87d4d227c1b5421668ca9e3734562b383470b07", size = 708940, upload-time = "2026-01-05T21:07:19.345Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ae/f4/f684725bd375208130ff3e9878ff3e671d888eec89a834617f3d7bcc14c9/langchain_anthropic-1.2.0-py3-none-any.whl", hash = "sha256:f489df97833e12ca0360a098eb9d04e410752840416be87ab60b0a3e120a99fe", size = 49512, upload-time = "2025-11-24T14:17:16.048Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/4f/7a5b32764addf4b757545b89899b9d76688176f19e4ee89868e3b8bbfd0f/langchain_anthropic-1.3.1-py3-none-any.whl", hash = "sha256:1fc28cf8037c30597ee6172fc2ff9e345efe8149a8c2a39897b1eebba2948322", size = 46328, upload-time = "2026-01-05T21:07:18.261Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "1.1.0"
|
||||
version = "1.2.6"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
@@ -1260,24 +1331,40 @@ dependencies = [
|
||||
{ name = "pyyaml" },
|
||||
{ name = "tenacity" },
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "uuid-utils" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1e/17/67c1cc2ace919e2b02dd9d783154d7fb3f1495a4ef835d9cd163b7855ac2/langchain_core-1.1.0.tar.gz", hash = "sha256:2b76a82d427922c8bc51c08404af4fc2a29e9f161dfe2297cb05091e810201e7", size = 781995, upload-time = "2025-11-21T21:01:26.958Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b9/ce/ba5ed5ea6df22965b2893c2ed28ebb456204962723d408904c4acfa5e942/langchain_core-1.2.6.tar.gz", hash = "sha256:b4e7841dd7f8690375aa07c54739178dc2c635147d475e0c2955bf82a1afa498", size = 833343, upload-time = "2026-01-02T21:35:44.749Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/71/1e/e129fc471a2d2a7b3804480a937b5ab9319cab9f4142624fcb115f925501/langchain_core-1.1.0-py3-none-any.whl", hash = "sha256:2c9f27dadc6d21ed4aa46506a37a56e6a7e2d2f9141922dc5c251ba921822ee6", size = 473752, upload-time = "2025-11-21T21:01:25.841Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/40/0655892c245d8fbe6bca6d673ab5927e5c3ab7be143de40b52289a0663bc/langchain_core-1.2.6-py3-none-any.whl", hash = "sha256:aa6ed954b4b1f4504937fe75fdf674317027e9a91ba7a97558b0de3dc8004e34", size = 489096, upload-time = "2026-01-02T21:35:43.391Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-google-genai"
|
||||
version = "4.1.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "filetype" },
|
||||
{ name = "google-genai" },
|
||||
{ name = "langchain-core" },
|
||||
{ name = "pydantic" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ba/85/078d5aba488a82a53b8372ac1037dee4f64b020bac69e6a07e37a5059059/langchain_google_genai-4.1.3.tar.gz", hash = "sha256:28966c8fe58c9a401fdc37aeeeb0eb51744210803838ce050f022fc53d2f994e", size = 277024, upload-time = "2026-01-05T23:29:34.362Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/aa/ca61dc2d202a23d7605a5c0ea24bd86a39a5c23c932a166b87c7797747c5/langchain_google_genai-4.1.3-py3-none-any.whl", hash = "sha256:5d710e2dcf449d49704bdbcd31729be90b386fa008395f9552a5c090241de1a5", size = 66262, upload-time = "2026-01-05T23:29:32.924Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-openai"
|
||||
version = "1.0.2"
|
||||
version = "1.1.7"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
{ name = "openai" },
|
||||
{ name = "tiktoken" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b3/3c/edb7ffca76fdcfd938ce8380bf8ec79a0a8be41ba7fdbf6f9fe1cb5fd1a8/langchain_openai-1.0.2.tar.gz", hash = "sha256:621e8295c52db9a1fc74806a0bd227ea215c132c6c5e421d2982c9ee78468769", size = 1025578, upload-time = "2025-11-03T14:08:32.121Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/38/b7/30bfc4d1b658a9ee524bcce3b0b2ec9c45a11c853a13c4f0c9da9882784b/langchain_openai-1.1.7.tar.gz", hash = "sha256:f5ec31961ed24777548b63a5fe313548bc6e0eb9730d6552b8c6418765254c81", size = 1039134, upload-time = "2026-01-07T19:44:59.728Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/78/9b/7af1d539a051d195c5ecc5990ebd483f208c40f75a8a9532846d16762704/langchain_openai-1.0.2-py3-none-any.whl", hash = "sha256:b3eb9b82752063b46452aa868d8c8bc1604e57631648c3bc325bba58d3aeb143", size = 81934, upload-time = "2025-11-03T14:08:30.655Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/64/a1/50e7596aca775d8c3883eceeaf47489fac26c57c1abe243c00174f715a8a/langchain_openai-1.1.7-py3-none-any.whl", hash = "sha256:34e9cd686aac1a120d6472804422792bf8080a2103b5d21ee450c9e42d053815", size = 84753, upload-time = "2026-01-07T19:44:58.629Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1299,15 +1386,29 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langgraph-checkpoint"
|
||||
version = "3.0.1"
|
||||
version = "2.1.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
{ name = "ormsgpack" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0f/07/2b1c042fa87d40cf2db5ca27dc4e8dd86f9a0436a10aa4361a8982718ae7/langgraph_checkpoint-3.0.1.tar.gz", hash = "sha256:59222f875f85186a22c494aedc65c4e985a3df27e696e5016ba0b98a5ed2cee0", size = 137785, upload-time = "2025-11-04T21:55:47.774Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/29/83/6404f6ed23a91d7bc63d7df902d144548434237d017820ceaa8d014035f2/langgraph_checkpoint-2.1.2.tar.gz", hash = "sha256:112e9d067a6eff8937caf198421b1ffba8d9207193f14ac6f89930c1260c06f9", size = 142420, upload-time = "2025-10-07T17:45:17.129Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/48/e3/616e3a7ff737d98c1bbb5700dd62278914e2a9ded09a79a1fa93cf24ce12/langgraph_checkpoint-3.0.1-py3-none-any.whl", hash = "sha256:9b04a8d0edc0474ce4eaf30c5d731cee38f11ddff50a6177eead95b5c4e4220b", size = 46249, upload-time = "2025-11-04T21:55:46.472Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/f2/06bf5addf8ee664291e1b9ffa1f28fc9d97e59806dc7de5aea9844cbf335/langgraph_checkpoint-2.1.2-py3-none-any.whl", hash = "sha256:911ebffb069fd01775d4b5184c04aaafc2962fcdf50cf49d524cd4367c4d0c60", size = 45763, upload-time = "2025-10-07T17:45:16.19Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langgraph-checkpoint-sqlite"
|
||||
version = "2.0.11"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "aiosqlite" },
|
||||
{ name = "langgraph-checkpoint" },
|
||||
{ name = "sqlite-vec" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d2/aa/5f9e9de74a6d0a9b77c703db0068d0f0cdc8dbc2e9b292ae95f4de115a44/langgraph_checkpoint_sqlite-2.0.11.tar.gz", hash = "sha256:e9337204c27b01a29edff65c1ecb7da0ca8ac7f1bd66b405617459043ac6c3ed", size = 109749, upload-time = "2025-07-25T17:32:07.773Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/d4/c56f6b0e8c8211791c9954bef0edaef3dc2e118cf33800be44c7b90432bd/langgraph_checkpoint_sqlite-2.0.11-py3-none-any.whl", hash = "sha256:11c40d93225ce99fa2800332c97b16280addf9f15274def32c4d547955290d3f", size = 31191, upload-time = "2025-07-25T17:32:06.355Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1354,6 +1455,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/80/1a/0c84f7096d41d64425d29db549c8d6fe075f925a5f2022e8087d01d862c2/langsmith-0.4.47-py3-none-any.whl", hash = "sha256:b9e514611d4e1570e33595d33ccb1fe6eda9f96c5f961095a138651f746c1ef5", size = 411207, upload-time = "2025-11-24T16:01:59.123Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "linkify-it-py"
|
||||
version = "2.0.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "uc-micro-py" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/2a/ae/bb56c6828e4797ba5a4821eec7c43b8bf40f69cda4d4f5f8c8a2810ec96a/linkify-it-py-2.0.3.tar.gz", hash = "sha256:68cda27e162e9215c17d786649d1da0021a451bdc436ef9e0fa0ba5234b9b048", size = 27946, upload-time = "2024-02-04T14:48:04.179Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/04/1e/b832de447dee8b582cac175871d2f6c3d5077cc56d5575cadba1fd1cccfa/linkify_it_py-2.0.3-py3-none-any.whl", hash = "sha256:6bcbc417b0ac14323382aef5c5192c0075bf8a9d6b41820a2b66371eac6b6d79", size = 19820, upload-time = "2024-02-04T14:48:02.496Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "litellm"
|
||||
version = "1.79.3"
|
||||
@@ -1389,6 +1502,11 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
linkify = [
|
||||
{ name = "linkify-it-py" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "markdownify"
|
||||
version = "1.2.2"
|
||||
@@ -1474,6 +1592,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/df/081ea8c41696d598e7cea4f101e49da718a9b6c9dcaaad4e76dfc11a022c/marshmallow-4.1.0-py3-none-any.whl", hash = "sha256:9901660499be3b880dc92d6b5ee0b9a79e94265b7793f71021f92040c07129f1", size = 48286, upload-time = "2025-11-01T15:40:35.542Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mdit-py-plugins"
|
||||
version = "0.5.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "markdown-it-py" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b2/fd/a756d36c0bfba5f6e39a1cdbdbfdd448dc02692467d83816dff4592a1ebc/mdit_py_plugins-0.5.0.tar.gz", hash = "sha256:f4918cb50119f50446560513a8e311d574ff6aaed72606ddae6d35716fe809c6", size = 44655, upload-time = "2025-08-11T07:25:49.083Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/86/dd6e5db36df29e76c7a7699123569a4a18c1623ce68d826ed96c62643cae/mdit_py_plugins-0.5.0-py3-none-any.whl", hash = "sha256:07a08422fc1936a5d26d146759e9155ea466e842f5ab2f7d2266dd084c8dab1f", size = 57205, upload-time = "2025-08-11T07:25:47.597Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mdurl"
|
||||
version = "0.1.2"
|
||||
@@ -1907,6 +2037,84 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pillow"
|
||||
version = "12.1.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d0/02/d52c733a2452ef1ffcc123b68e6606d07276b0e358db70eabad7e40042b7/pillow-12.1.0.tar.gz", hash = "sha256:5c5ae0a06e9ea030ab786b0251b32c7e4ce10e58d983c0d5c56029455180b5b9", size = 46977283, upload-time = "2026-01-02T09:13:29.892Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/20/31/dc53fe21a2f2996e1b7d92bf671cdb157079385183ef7c1ae08b485db510/pillow-12.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a332ac4ccb84b6dde65dbace8431f3af08874bf9770719d32a635c4ef411b18b", size = 5262642, upload-time = "2026-01-02T09:11:10.138Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/c1/10e45ac9cc79419cedf5121b42dcca5a50ad2b601fa080f58c22fb27626e/pillow-12.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:907bfa8a9cb790748a9aa4513e37c88c59660da3bcfffbd24a7d9e6abf224551", size = 4657464, upload-time = "2026-01-02T09:11:12.319Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ad/26/7b82c0ab7ef40ebede7a97c72d473bda5950f609f8e0c77b04af574a0ddb/pillow-12.1.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:efdc140e7b63b8f739d09a99033aa430accce485ff78e6d311973a67b6bf3208", size = 6234878, upload-time = "2026-01-02T09:11:14.096Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/76/25/27abc9792615b5e886ca9411ba6637b675f1b77af3104710ac7353fe5605/pillow-12.1.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bef9768cab184e7ae6e559c032e95ba8d07b3023c289f79a2bd36e8bf85605a5", size = 8044868, upload-time = "2026-01-02T09:11:15.903Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0a/ea/f200a4c36d836100e7bc738fc48cd963d3ba6372ebc8298a889e0cfc3359/pillow-12.1.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:742aea052cf5ab5034a53c3846165bc3ce88d7c38e954120db0ab867ca242661", size = 6349468, upload-time = "2026-01-02T09:11:17.631Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/11/8f/48d0b77ab2200374c66d344459b8958c86693be99526450e7aee714e03e4/pillow-12.1.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a6dfc2af5b082b635af6e08e0d1f9f1c4e04d17d4e2ca0ef96131e85eda6eb17", size = 7041518, upload-time = "2026-01-02T09:11:19.389Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1d/23/c281182eb986b5d31f0a76d2a2c8cd41722d6fb8ed07521e802f9bba52de/pillow-12.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:609e89d9f90b581c8d16358c9087df76024cf058fa693dd3e1e1620823f39670", size = 6462829, upload-time = "2026-01-02T09:11:21.28Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/25/ef/7018273e0faac099d7b00982abdcc39142ae6f3bd9ceb06de09779c4a9d6/pillow-12.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:43b4899cfd091a9693a1278c4982f3e50f7fb7cff5153b05174b4afc9593b616", size = 7166756, upload-time = "2026-01-02T09:11:23.559Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8f/c8/993d4b7ab2e341fe02ceef9576afcf5830cdec640be2ac5bee1820d693d4/pillow-12.1.0-cp312-cp312-win32.whl", hash = "sha256:aa0c9cc0b82b14766a99fbe6084409972266e82f459821cd26997a488a7261a7", size = 6328770, upload-time = "2026-01-02T09:11:25.661Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/87/90b358775a3f02765d87655237229ba64a997b87efa8ccaca7dd3e36e7a7/pillow-12.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:d70534cea9e7966169ad29a903b99fc507e932069a881d0965a1a84bb57f6c6d", size = 7033406, upload-time = "2026-01-02T09:11:27.474Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/cf/881b457eccacac9e5b2ddd97d5071fb6d668307c57cbf4e3b5278e06e536/pillow-12.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:65b80c1ee7e14a87d6a068dd3b0aea268ffcabfe0498d38661b00c5b4b22e74c", size = 2452612, upload-time = "2026-01-02T09:11:29.309Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dd/c7/2530a4aa28248623e9d7f27316b42e27c32ec410f695929696f2e0e4a778/pillow-12.1.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:7b5dd7cbae20285cdb597b10eb5a2c13aa9de6cde9bb64a3c1317427b1db1ae1", size = 4062543, upload-time = "2026-01-02T09:11:31.566Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8f/1f/40b8eae823dc1519b87d53c30ed9ef085506b05281d313031755c1705f73/pillow-12.1.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:29a4cef9cb672363926f0470afc516dbf7305a14d8c54f7abbb5c199cd8f8179", size = 4138373, upload-time = "2026-01-02T09:11:33.367Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d4/77/6fa60634cf06e52139fd0e89e5bbf055e8166c691c42fb162818b7fda31d/pillow-12.1.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:681088909d7e8fa9e31b9799aaa59ba5234c58e5e4f1951b4c4d1082a2e980e0", size = 3601241, upload-time = "2026-01-02T09:11:35.011Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4f/bf/28ab865de622e14b747f0cd7877510848252d950e43002e224fb1c9ababf/pillow-12.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:983976c2ab753166dc66d36af6e8ec15bb511e4a25856e2227e5f7e00a160587", size = 5262410, upload-time = "2026-01-02T09:11:36.682Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/34/583420a1b55e715937a85bd48c5c0991598247a1fd2eb5423188e765ea02/pillow-12.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:db44d5c160a90df2d24a24760bbd37607d53da0b34fb546c4c232af7192298ac", size = 4657312, upload-time = "2026-01-02T09:11:38.535Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1d/fd/f5a0896839762885b3376ff04878f86ab2b097c2f9a9cdccf4eda8ba8dc0/pillow-12.1.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6b7a9d1db5dad90e2991645874f708e87d9a3c370c243c2d7684d28f7e133e6b", size = 6232605, upload-time = "2026-01-02T09:11:40.602Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/aa/938a09d127ac1e70e6ed467bd03834350b33ef646b31edb7452d5de43792/pillow-12.1.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6258f3260986990ba2fa8a874f8b6e808cf5abb51a94015ca3dc3c68aa4f30ea", size = 8041617, upload-time = "2026-01-02T09:11:42.721Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/e8/538b24cb426ac0186e03f80f78bc8dc7246c667f58b540bdd57c71c9f79d/pillow-12.1.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e115c15e3bc727b1ca3e641a909f77f8ca72a64fff150f666fcc85e57701c26c", size = 6346509, upload-time = "2026-01-02T09:11:44.955Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/9a/632e58ec89a32738cabfd9ec418f0e9898a2b4719afc581f07c04a05e3c9/pillow-12.1.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6741e6f3074a35e47c77b23a4e4f2d90db3ed905cb1c5e6e0d49bff2045632bc", size = 7038117, upload-time = "2026-01-02T09:11:46.736Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c7/a2/d40308cf86eada842ca1f3ffa45d0ca0df7e4ab33c83f81e73f5eaed136d/pillow-12.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:935b9d1aed48fcfb3f838caac506f38e29621b44ccc4f8a64d575cb1b2a88644", size = 6460151, upload-time = "2026-01-02T09:11:48.625Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/88/f5b058ad6453a085c5266660a1417bdad590199da1b32fb4efcff9d33b05/pillow-12.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5fee4c04aad8932da9f8f710af2c1a15a83582cfb884152a9caa79d4efcdbf9c", size = 7164534, upload-time = "2026-01-02T09:11:50.445Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/ce/c17334caea1db789163b5d855a5735e47995b0b5dc8745e9a3605d5f24c0/pillow-12.1.0-cp313-cp313-win32.whl", hash = "sha256:a786bf667724d84aa29b5db1c61b7bfdde380202aaca12c3461afd6b71743171", size = 6332551, upload-time = "2026-01-02T09:11:52.234Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/07/74a9d941fa45c90a0d9465098fe1ec85de3e2afbdc15cc4766622d516056/pillow-12.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:461f9dfdafa394c59cd6d818bdfdbab4028b83b02caadaff0ffd433faf4c9a7a", size = 7040087, upload-time = "2026-01-02T09:11:54.822Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/88/09/c99950c075a0e9053d8e880595926302575bc742b1b47fe1bbcc8d388d50/pillow-12.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:9212d6b86917a2300669511ed094a9406888362e085f2431a7da985a6b124f45", size = 2452470, upload-time = "2026-01-02T09:11:56.522Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b5/ba/970b7d85ba01f348dee4d65412476321d40ee04dcb51cd3735b9dc94eb58/pillow-12.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:00162e9ca6d22b7c3ee8e61faa3c3253cd19b6a37f126cad04f2f88b306f557d", size = 5264816, upload-time = "2026-01-02T09:11:58.227Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/10/60/650f2fb55fdba7a510d836202aa52f0baac633e50ab1cf18415d332188fb/pillow-12.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7d6daa89a00b58c37cb1747ec9fb7ac3bc5ffd5949f5888657dfddde6d1312e0", size = 4660472, upload-time = "2026-01-02T09:12:00.798Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/c0/5273a99478956a099d533c4f46cbaa19fd69d606624f4334b85e50987a08/pillow-12.1.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e2479c7f02f9d505682dc47df8c0ea1fc5e264c4d1629a5d63fe3e2334b89554", size = 6268974, upload-time = "2026-01-02T09:12:02.572Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/26/0bf714bc2e73d5267887d47931d53c4ceeceea6978148ed2ab2a4e6463c4/pillow-12.1.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f188d580bd870cda1e15183790d1cc2fa78f666e76077d103edf048eed9c356e", size = 8073070, upload-time = "2026-01-02T09:12:04.75Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/43/cf/1ea826200de111a9d65724c54f927f3111dc5ae297f294b370a670c17786/pillow-12.1.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0fde7ec5538ab5095cc02df38ee99b0443ff0e1c847a045554cf5f9af1f4aa82", size = 6380176, upload-time = "2026-01-02T09:12:06.626Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/e0/7938dd2b2013373fd85d96e0f38d62b7a5a262af21ac274250c7ca7847c9/pillow-12.1.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0ed07dca4a8464bada6139ab38f5382f83e5f111698caf3191cb8dbf27d908b4", size = 7067061, upload-time = "2026-01-02T09:12:08.624Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/ad/a2aa97d37272a929a98437a8c0ac37b3cf012f4f8721e1bd5154699b2518/pillow-12.1.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:f45bd71d1fa5e5749587613037b172e0b3b23159d1c00ef2fc920da6f470e6f0", size = 6491824, upload-time = "2026-01-02T09:12:10.488Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a4/44/80e46611b288d51b115826f136fb3465653c28f491068a72d3da49b54cd4/pillow-12.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:277518bf4fe74aa91489e1b20577473b19ee70fb97c374aa50830b279f25841b", size = 7190911, upload-time = "2026-01-02T09:12:12.772Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/77/eacc62356b4cf81abe99ff9dbc7402750044aed02cfd6a503f7c6fc11f3e/pillow-12.1.0-cp313-cp313t-win32.whl", hash = "sha256:7315f9137087c4e0ee73a761b163fc9aa3b19f5f606a7fc08d83fd3e4379af65", size = 6336445, upload-time = "2026-01-02T09:12:14.775Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/3c/57d81d0b74d218706dafccb87a87ea44262c43eef98eb3b164fd000e0491/pillow-12.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:0ddedfaa8b5f0b4ffbc2fa87b556dc59f6bb4ecb14a53b33f9189713ae8053c0", size = 7045354, upload-time = "2026-01-02T09:12:16.599Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ac/82/8b9b97bba2e3576a340f93b044a3a3a09841170ab4c1eb0d5c93469fd32f/pillow-12.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:80941e6d573197a0c28f394753de529bb436b1ca990ed6e765cf42426abc39f8", size = 2454547, upload-time = "2026-01-02T09:12:18.704Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8c/87/bdf971d8bbcf80a348cc3bacfcb239f5882100fe80534b0ce67a784181d8/pillow-12.1.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:5cb7bc1966d031aec37ddb9dcf15c2da5b2e9f7cc3ca7c54473a20a927e1eb91", size = 4062533, upload-time = "2026-01-02T09:12:20.791Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/4f/5eb37a681c68d605eb7034c004875c81f86ec9ef51f5be4a63eadd58859a/pillow-12.1.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:97e9993d5ed946aba26baf9c1e8cf18adbab584b99f452ee72f7ee8acb882796", size = 4138546, upload-time = "2026-01-02T09:12:23.664Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/11/6d/19a95acb2edbace40dcd582d077b991646b7083c41b98da4ed7555b59733/pillow-12.1.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:414b9a78e14ffeb98128863314e62c3f24b8a86081066625700b7985b3f529bd", size = 3601163, upload-time = "2026-01-02T09:12:26.338Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/36/2b8138e51cb42e4cc39c3297713455548be855a50558c3ac2beebdc251dd/pillow-12.1.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:e6bdb408f7c9dd2a5ff2b14a3b0bb6d4deb29fb9961e6eb3ae2031ae9a5cec13", size = 5266086, upload-time = "2026-01-02T09:12:28.782Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/53/4b/649056e4d22e1caa90816bf99cef0884aed607ed38075bd75f091a607a38/pillow-12.1.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3413c2ae377550f5487991d444428f1a8ae92784aac79caa8b1e3b89b175f77e", size = 4657344, upload-time = "2026-01-02T09:12:31.117Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/6b/c5742cea0f1ade0cd61485dc3d81f05261fc2276f537fbdc00802de56779/pillow-12.1.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e5dcbe95016e88437ecf33544ba5db21ef1b8dd6e1b434a2cb2a3d605299e643", size = 6232114, upload-time = "2026-01-02T09:12:32.936Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bf/8f/9f521268ce22d63991601aafd3d48d5ff7280a246a1ef62d626d67b44064/pillow-12.1.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d0a7735df32ccbcc98b98a1ac785cc4b19b580be1bdf0aeb5c03223220ea09d5", size = 8042708, upload-time = "2026-01-02T09:12:34.78Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/eb/257f38542893f021502a1bbe0c2e883c90b5cff26cc33b1584a841a06d30/pillow-12.1.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0c27407a2d1b96774cbc4a7594129cc027339fd800cd081e44497722ea1179de", size = 6347762, upload-time = "2026-01-02T09:12:36.748Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/5a/8ba375025701c09b309e8d5163c5a4ce0102fa86bbf8800eb0d7ac87bc51/pillow-12.1.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:15c794d74303828eaa957ff8070846d0efe8c630901a1c753fdc63850e19ecd9", size = 7039265, upload-time = "2026-01-02T09:12:39.082Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cf/dc/cf5e4cdb3db533f539e88a7bbf9f190c64ab8a08a9bc7a4ccf55067872e4/pillow-12.1.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c990547452ee2800d8506c4150280757f88532f3de2a58e3022e9b179107862a", size = 6462341, upload-time = "2026-01-02T09:12:40.946Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d0/47/0291a25ac9550677e22eda48510cfc4fa4b2ef0396448b7fbdc0a6946309/pillow-12.1.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b63e13dd27da389ed9475b3d28510f0f954bca0041e8e551b2a4eb1eab56a39a", size = 7165395, upload-time = "2026-01-02T09:12:42.706Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4f/4c/e005a59393ec4d9416be06e6b45820403bb946a778e39ecec62f5b2b991e/pillow-12.1.0-cp314-cp314-win32.whl", hash = "sha256:1a949604f73eb07a8adab38c4fe50791f9919344398bdc8ac6b307f755fc7030", size = 6431413, upload-time = "2026-01-02T09:12:44.944Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/af/f23697f587ac5f9095d67e31b81c95c0249cd461a9798a061ed6709b09b5/pillow-12.1.0-cp314-cp314-win_amd64.whl", hash = "sha256:4f9f6a650743f0ddee5593ac9e954ba1bdbc5e150bc066586d4f26127853ab94", size = 7176779, upload-time = "2026-01-02T09:12:46.727Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b3/36/6a51abf8599232f3e9afbd16d52829376a68909fe14efe29084445db4b73/pillow-12.1.0-cp314-cp314-win_arm64.whl", hash = "sha256:808b99604f7873c800c4840f55ff389936ef1948e4e87645eaf3fccbc8477ac4", size = 2543105, upload-time = "2026-01-02T09:12:49.243Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/82/54/2e1dd20c8749ff225080d6ba465a0cab4387f5db0d1c5fb1439e2d99923f/pillow-12.1.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:bc11908616c8a283cf7d664f77411a5ed2a02009b0097ff8abbba5e79128ccf2", size = 5268571, upload-time = "2026-01-02T09:12:51.11Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/57/61/571163a5ef86ec0cf30d265ac2a70ae6fc9e28413d1dc94fa37fae6bda89/pillow-12.1.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:896866d2d436563fa2a43a9d72f417874f16b5545955c54a64941e87c1376c61", size = 4660426, upload-time = "2026-01-02T09:12:52.865Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5e/e1/53ee5163f794aef1bf84243f755ee6897a92c708505350dd1923f4afec48/pillow-12.1.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8e178e3e99d3c0ea8fc64b88447f7cac8ccf058af422a6cedc690d0eadd98c51", size = 6269908, upload-time = "2026-01-02T09:12:54.884Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/0b/b4b4106ff0ee1afa1dc599fde6ab230417f800279745124f6c50bcffed8e/pillow-12.1.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:079af2fb0c599c2ec144ba2c02766d1b55498e373b3ac64687e43849fbbef5bc", size = 8074733, upload-time = "2026-01-02T09:12:56.802Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/9f/80b411cbac4a732439e629a26ad3ef11907a8c7fc5377b7602f04f6fe4e7/pillow-12.1.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bdec5e43377761c5dbca620efb69a77f6855c5a379e32ac5b158f54c84212b14", size = 6381431, upload-time = "2026-01-02T09:12:58.823Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8f/b7/d65c45db463b66ecb6abc17c6ba6917a911202a07662247e1355ce1789e7/pillow-12.1.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:565c986f4b45c020f5421a4cea13ef294dde9509a8577f29b2fc5edc7587fff8", size = 7068529, upload-time = "2026-01-02T09:13:00.885Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/50/96/dfd4cd726b4a45ae6e3c669fc9e49deb2241312605d33aba50499e9d9bd1/pillow-12.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:43aca0a55ce1eefc0aefa6253661cb54571857b1a7b2964bd8a1e3ef4b729924", size = 6492981, upload-time = "2026-01-02T09:13:03.314Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4d/1c/b5dc52cf713ae46033359c5ca920444f18a6359ce1020dd3e9c553ea5bc6/pillow-12.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0deedf2ea233722476b3a81e8cdfbad786f7adbed5d848469fa59fe52396e4ef", size = 7191878, upload-time = "2026-01-02T09:13:05.276Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/53/26/c4188248bd5edaf543864fe4834aebe9c9cb4968b6f573ce014cc42d0720/pillow-12.1.0-cp314-cp314t-win32.whl", hash = "sha256:b17fbdbe01c196e7e159aacb889e091f28e61020a8abeac07b68079b6e626988", size = 6438703, upload-time = "2026-01-02T09:13:07.491Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/0e/69ed296de8ea05cb03ee139cee600f424ca166e632567b2d66727f08c7ed/pillow-12.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:27b9baecb428899db6c0de572d6d305cfaf38ca1596b5c0542a5182e3e74e8c6", size = 7182927, upload-time = "2026-01-02T09:13:09.841Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/f5/68334c015eed9b5cff77814258717dec591ded209ab5b6fb70e2ae873d1d/pillow-12.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:f61333d817698bdcdd0f9d7793e365ac3d2a21c1f1eb02b32ad6aefb8d8ea831", size = 2545104, upload-time = "2026-01-02T09:13:12.068Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "platformdirs"
|
||||
version = "4.5.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pluggy"
|
||||
version = "1.6.0"
|
||||
@@ -2070,6 +2278,27 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/7b/03/f335d6c52b4a4761bcc83499789a1e2e16d9d201a58c327a9b5cc9a41bd9/pyarrow-22.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0c34fe18094686194f204a3b1787a27456897d8a2d62caf84b61e8dfbc0252ae", size = 29185594, upload-time = "2025-10-24T10:09:53.111Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyasn1"
|
||||
version = "0.6.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyasn1-modules"
|
||||
version = "0.4.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pyasn1" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.12.4"
|
||||
@@ -2509,6 +2738,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/69/64d43b21a10d72b45939a28961216baeb721cc2a430f5f7c3bfa21659a53/rpds_py-0.28.0-cp314-cp314t-win_amd64.whl", hash = "sha256:7a4e59c90d9c27c561eb3160323634a9ff50b04e4f7820600a2beb0ac90db578", size = 216233, upload-time = "2025-10-22T22:24:05.471Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rsa"
|
||||
version = "4.9.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pyasn1" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034, upload-time = "2025-04-16T09:51:18.218Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.14.4"
|
||||
@@ -2611,6 +2852,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/14/a0/bb38d3b76b8cae341dad93a2dd83ab7462e6dbcdd84d43f54ee60a8dc167/soupsieve-2.8-py3-none-any.whl", hash = "sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c", size = 36679, upload-time = "2025-08-27T15:39:50.179Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sqlite-vec"
|
||||
version = "0.1.6"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/88/ed/aabc328f29ee6814033d008ec43e44f2c595447d9cccd5f2aabe60df2933/sqlite_vec-0.1.6-py3-none-macosx_10_6_x86_64.whl", hash = "sha256:77491bcaa6d496f2acb5cc0d0ff0b8964434f141523c121e313f9a7d8088dee3", size = 164075, upload-time = "2024-11-20T16:40:29.847Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/57/05604e509a129b22e303758bfa062c19afb020557d5e19b008c64016704e/sqlite_vec-0.1.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:fdca35f7ee3243668a055255d4dee4dea7eed5a06da8cad409f89facf4595361", size = 165242, upload-time = "2024-11-20T16:40:31.206Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/48/dbb2cc4e5bad88c89c7bb296e2d0a8df58aab9edc75853728c361eefc24f/sqlite_vec-0.1.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b0519d9cd96164cd2e08e8eed225197f9cd2f0be82cb04567692a0a4be02da3", size = 103704, upload-time = "2024-11-20T16:40:33.729Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/80/76/97f33b1a2446f6ae55e59b33869bed4eafaf59b7f4c662c8d9491b6a714a/sqlite_vec-0.1.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux1_x86_64.whl", hash = "sha256:823b0493add80d7fe82ab0fe25df7c0703f4752941aee1c7b2b02cec9656cb24", size = 151556, upload-time = "2024-11-20T16:40:35.387Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/98/e8bc58b178266eae2fcf4c9c7a8303a8d41164d781b32d71097924a6bebe/sqlite_vec-0.1.6-py3-none-win_amd64.whl", hash = "sha256:c65bcfd90fa2f41f9000052bcb8bb75d38240b2dae49225389eca6c3136d3f0c", size = 281540, upload-time = "2024-11-20T16:40:37.296Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "synchronicity"
|
||||
version = "0.10.3"
|
||||
@@ -2646,6 +2899,36 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textual"
|
||||
version = "7.0.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "markdown-it-py", extra = ["linkify"] },
|
||||
{ name = "mdit-py-plugins" },
|
||||
{ name = "platformdirs" },
|
||||
{ name = "pygments" },
|
||||
{ name = "rich" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/db/ab/d94bc20d701061f21c7d0669db8d5609d835298369bd71aff99b388bfeac/textual-7.0.1.tar.gz", hash = "sha256:d61db446d22913c0fa6ca2a110c895732b40408e12b0eab1022b5d766924e1ed", size = 1582186, upload-time = "2026-01-07T13:07:23.682Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/84/38/47fab2a5fad163ca4851f7a20eb2442491cc63bf2756ec4ef161bc1461dd/textual-7.0.1-py3-none-any.whl", hash = "sha256:f9b7d16fa9b640bfff2a2008bf31e3f2d4429dc85e07a9583be033840ed15174", size = 715268, upload-time = "2026-01-07T13:07:22.006Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textual-autocomplete"
|
||||
version = "4.0.6"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "textual" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1e/3a/80411bc7b94969eb116ad1b18db90f8dce8a1de441278c4a81fee55a27ca/textual_autocomplete-4.0.6.tar.gz", hash = "sha256:2ba2f0d767be4480ecacb3e4b130cf07340e033c3500fc424fed9125d27a4586", size = 97967, upload-time = "2025-09-24T21:19:20.213Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9f/66/ebe744d79c87f25a42d2654dddbd09462edd595f2ded715245a51a546461/textual_autocomplete-4.0.6-py3-none-any.whl", hash = "sha256:bff69c19386e2cbb4a007503b058dc37671d480a4fa2ddb3959c15ceb4aff9b5", size = 16499, upload-time = "2025-09-24T21:19:18.489Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tiktoken"
|
||||
version = "0.12.0"
|
||||
@@ -2815,6 +3098,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "uc-micro-py"
|
||||
version = "1.0.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/91/7a/146a99696aee0609e3712f2b44c6274566bc368dfe8375191278045186b8/uc-micro-py-1.0.3.tar.gz", hash = "sha256:d321b92cff673ec58027c04015fcaa8bb1e005478643ff4a500882eaab88c48a", size = 6043, upload-time = "2024-02-09T16:52:01.654Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/37/87/1f677586e8ac487e29672e4b17455758fce261de06a0d086167bb760361a/uc_micro_py-1.0.3-py3-none-any.whl", hash = "sha256:db1dffff340817673d7b466ec86114a9dc0e9d4d9b5ba229d9d60e5c12600cd5", size = 6229, upload-time = "2024-02-09T16:52:00.371Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "urllib3"
|
||||
version = "2.5.0"
|
||||
@@ -2826,22 +3118,24 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "uuid-utils"
|
||||
version = "0.11.1"
|
||||
version = "0.13.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e2/ef/b6c1fd4fee3b2854bf9d602530ab8b6624882e2691c15a9c4d22ea8c03eb/uuid_utils-0.11.1.tar.gz", hash = "sha256:7ef455547c2ccb712840b106b5ab006383a9bfe4125ba1c5ab92e47bcbf79b46", size = 19933, upload-time = "2025-10-02T13:32:09.526Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/fe/8a/17b11768dcb473d3a255c02ffdd94fbd1b345c906efea0a39124dcbaed52/uuid_utils-0.13.0.tar.gz", hash = "sha256:4c17df6427a9e23a4cd7fb9ee1efb53b8abb078660b9bdb2524ca8595022dfe1", size = 21921, upload-time = "2026-01-08T15:48:10.841Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/40/f5/254d7ce4b3aa4a1a3a4f279e0cc74eec8b4d3a61641d8ffc6e983907f2ca/uuid_utils-0.11.1-cp39-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:4bc8cf73c375b9ea11baf70caacc2c4bf7ce9bfd804623aa0541e5656f3dbeaf", size = 581019, upload-time = "2025-10-02T13:31:32.239Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/68/e6/f7d14c4e1988d8beb3ac9bd773f370376c704925bdfb07380f5476bb2986/uuid_utils-0.11.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:0d2cb3bcc6f5862d08a0ee868b18233bc63ba9ea0e85ea9f3f8e703983558eba", size = 294377, upload-time = "2025-10-02T13:31:34.01Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8e/40/847a9a0258e7a2a14b015afdaa06ee4754a2680db7b74bac159d594eeb18/uuid_utils-0.11.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:463400604f623969f198aba9133ebfd717636f5e34257340302b1c3ff685dc0f", size = 328070, upload-time = "2025-10-02T13:31:35.619Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/44/0c/c5d342d31860c9b4f481ef31a4056825961f9b462d216555e76dcee580ea/uuid_utils-0.11.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aef66b935342b268c6ffc1796267a1d9e73135740a10fe7e4098e1891cbcc476", size = 333610, upload-time = "2025-10-02T13:31:37.058Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e1/4b/52edc023ffcb9ab9a4042a58974a79c39ba7a565e683f1fd9814b504cf13/uuid_utils-0.11.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fd65c41b81b762278997de0d027161f27f9cc4058fa57bbc0a1aaa63a63d6d1a", size = 475669, upload-time = "2025-10-02T13:31:38.38Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/59/81/ee55ee63264531bb1c97b5b6033ad6ec81b5cd77f89174e9aef3af3d8889/uuid_utils-0.11.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccfac9d5d7522d61accabb8c68448ead6407933415e67e62123ed6ed11f86510", size = 331946, upload-time = "2025-10-02T13:31:39.66Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cf/07/5d4be27af0e9648afa512f0d11bb6d96cb841dd6d29b57baa3fbf55fd62e/uuid_utils-0.11.1-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:003f48f05c01692d0c1f7e413d194e7299a1a364e0047a4eb904d3478b84eca1", size = 352920, upload-time = "2025-10-02T13:31:40.94Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5b/48/a69dddd9727512b0583b87bfff97d82a8813b28fb534a183c9e37033cfef/uuid_utils-0.11.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:a5c936042120bdc30d62f539165beaa4a6ba7e817a89e5409a6f06dc62c677a9", size = 509413, upload-time = "2025-10-02T13:31:42.547Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/66/0d/1b529a3870c2354dd838d5f133a1cba75220242b0061f04a904ca245a131/uuid_utils-0.11.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:2e16dcdbdf4cd34ffb31ead6236960adb50e6c962c9f4554a6ecfdfa044c6259", size = 529454, upload-time = "2025-10-02T13:31:44.338Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/f2/04a3f77c85585aac09d546edaf871a4012052fb8ace6dbddd153b4d50f02/uuid_utils-0.11.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f8b21fed11b23134502153d652c77c3a37fa841a9aa15a4e6186d440a22f1a0e", size = 498084, upload-time = "2025-10-02T13:31:45.601Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/89/08/538b380b4c4b220f3222c970930fe459cc37f1dfc6c8dc912568d027f17d/uuid_utils-0.11.1-cp39-abi3-win32.whl", hash = "sha256:72abab5ab27c1b914e3f3f40f910532ae242df1b5f0ae43f1df2ef2f610b2a8c", size = 174314, upload-time = "2025-10-02T13:31:47.269Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/66/971ec830094ac1c7d46381678f7138c1805015399805e7dd7769c893c9c8/uuid_utils-0.11.1-cp39-abi3-win_amd64.whl", hash = "sha256:5ed9962f8993ef2fd418205f92830c29344102f86871d99b57cef053abf227d9", size = 179214, upload-time = "2025-10-02T13:31:48.344Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/85/b8/d40848ca22781f206c60a1885fc737d2640392bd6b5792d455525accd89c/uuid_utils-0.13.0-cp39-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:83628283e977fb212e756bc055df8fdd2f9f589a2e539ba1abe755b8ce8df7a4", size = 602130, upload-time = "2026-01-08T15:47:34.877Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/b9/00a944b8096632ea12638181f8e294abcde3e3b8b5e29b777f809896f6ae/uuid_utils-0.13.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:c47638ed6334ab19d80f73664f153b04bbb04ab8ce4298d10da6a292d4d21c47", size = 304213, upload-time = "2026-01-08T15:47:36.807Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/da/d7/07b36c33aef683b81c9afff3ec178d5eb39d325447a68c3c68a62e4abb32/uuid_utils-0.13.0-cp39-abi3-manylinux_2_24_aarch64.whl", hash = "sha256:b276b538c57733ed406948584912da422a604313c71479654848b84b9e19c9b0", size = 340624, upload-time = "2026-01-08T15:47:38.821Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7d/55/fcff2fff02a27866cb1a6614c9df2b3ace721f0a0aab2b7b8f5a7d4e4221/uuid_utils-0.13.0-cp39-abi3-manylinux_2_24_armv7l.whl", hash = "sha256:bdaf2b77e34b199cf04cde28399495fd1ed951de214a4ece1f3919b2f945bb06", size = 346705, upload-time = "2026-01-08T15:47:40.397Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/48/67438506c2bb8bee1b4b00d7c0b3ff866401b4790849bf591d654d4ea0bc/uuid_utils-0.13.0-cp39-abi3-manylinux_2_24_i686.whl", hash = "sha256:eb2f0baf81e82f9769a7684022dca8f3bf801ca1574a3e94df1876e9d6f9271e", size = 366023, upload-time = "2026-01-08T15:47:42.662Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8b/d7/2d91ce17f62fd764d593430de296b70843cc25229c772453f7261de9e6a8/uuid_utils-0.13.0-cp39-abi3-manylinux_2_24_ppc64le.whl", hash = "sha256:6be6c4d11275f5cc402a4fdba6c2b1ce45fd3d99bb78716cd1cc2cbf6802b2ce", size = 471149, upload-time = "2026-01-08T15:47:44.963Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/9a/aa0756186073ba84daf5704c150d41ede10eb3185d510e02532e2071550e/uuid_utils-0.13.0-cp39-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:77621cf6ceca7f42173a642a01c01c216f9eaec3b7b65d093d2d6a433ca0a83d", size = 342130, upload-time = "2026-01-08T15:47:46.331Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/b4/3191789f4dc3bed59d79cec90559821756297a25d7dc34d1bf7781577a75/uuid_utils-0.13.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9a5a9eb06c2bb86dd876cd7b2fe927fc8543d14c90d971581db6ffda4a02526f", size = 524128, upload-time = "2026-01-08T15:47:47.628Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/30/29839210a8fff9fc219bfa7c8d8cd115324e92618cba0cda090d54d3d321/uuid_utils-0.13.0-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:775347c6110fb71360df17aac74132d8d47c1dbe71233ac98197fc872a791fd2", size = 615872, upload-time = "2026-01-08T15:47:50.61Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/99/ed/15000c96a8bd8f5fd8efd622109bf52549ea0b366f8ce71c45580fa55878/uuid_utils-0.13.0-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:cf95f6370ad1a0910ee7b5ad5228fd19c4ae32fe3627389006adaf519408c41e", size = 581023, upload-time = "2026-01-08T15:47:52.776Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/67/c8/3f809fa2dc2ca4bd331c792a3c7d3e45ae2b709d85847a12b8b27d1d5f19/uuid_utils-0.13.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5a88e23e0b2f4203fefe2ccbca5736ee06fcad10e61b5e7e39c8d7904bc13300", size = 546715, upload-time = "2026-01-08T15:47:54.415Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/80/4f7c7efd734d1494397c781bd3d421688e9c187ae836e3174625b1ddf8b0/uuid_utils-0.13.0-cp39-abi3-win32.whl", hash = "sha256:3e4f2cc54e6a99c0551158100ead528479ad2596847478cbad624977064ffce3", size = 177650, upload-time = "2026-01-08T15:47:55.679Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/94/d05ab68622e66ad787a241dfe5ccc649b3af09f30eae977b9ee8f7046aaa/uuid_utils-0.13.0-cp39-abi3-win_amd64.whl", hash = "sha256:046cb2756e1597b3de22d24851b769913e192135830486a0a70bf41327f0360c", size = 183211, upload-time = "2026-01-08T15:47:57.604Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/69/37/674b3ce25cd715b831ea8ebbd828b74c40159f04c95d1bb963b2c876fe79/uuid_utils-0.13.0-cp39-abi3-win_arm64.whl", hash = "sha256:5447a680df6ef8a5a353976aaf4c97cc3a3a22b1ee13671c44227b921e3ae2a9", size = 183518, upload-time = "2026-01-08T15:47:59.148Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -1,184 +0,0 @@
|
||||
# Rig DeepAgents Parity Plan (LangChain DeepAgents vs rig-deepagents)
|
||||
|
||||
## Differences Summary (Current Gaps)
|
||||
|
||||
1. **No high-level `create_deep_agent` factory in Rust**
|
||||
- Python wires default middleware, base prompt, and `recursion_limit=1000`.
|
||||
|
||||
2. **Filesystem + planning state updates missing**
|
||||
- Python tools (`write_file`, `edit_file`, `write_todos`, `task`) can return `Command` with state updates.
|
||||
- Rust tools return `String` only; `AgentState.todos` / `AgentState.files` are not updated.
|
||||
|
||||
3. **No `execute` tool + sandbox backend**
|
||||
- Python exposes `execute` when backend supports `SandboxBackendProtocol`.
|
||||
- Rust `Backend` has no `execute` equivalent.
|
||||
|
||||
4. **Subagent parity gaps**
|
||||
- Default middleware is not applied in `SubAgentMiddleware::new`.
|
||||
- `interrupt_on` and `default_interrupt_on` are not supported.
|
||||
- Subagent file updates are not merged back into parent state.
|
||||
|
||||
5. **Backend factory + Store backend missing**
|
||||
- Python supports `BackendFactory` and `StoreBackend` (LangGraph BaseStore).
|
||||
- Rust only has static backends (memory/filesystem/composite).
|
||||
|
||||
6. **Tool runtime metadata not propagated**
|
||||
- `ToolRuntime.tool_call_id` exists but is not set per tool call.
|
||||
|
||||
7. **Grep semantics differ**
|
||||
- Python uses regex; Rust uses literal search only.
|
||||
|
||||
8. **Structured output + cache hooks not wired**
|
||||
- Python supports `response_format`, `context_schema`, `cache` in `create_deep_agent`.
|
||||
|
||||
9. **Tool-result eviction / streaming / checkpointer integration missing**
|
||||
- Python evicts large tool outputs to filesystem and supports `checkpointer` + stream channels.
|
||||
|
||||
10. **Parallel tool-call execution semantics differ**
|
||||
- Python can execute multiple tool_calls in a single step; Rust executor is sequential.
|
||||
|
||||
11. **Pregel execution integration mismatch**
|
||||
- Python DeepAgents runs on LangGraph (Pregel-style) with Command state updates;
|
||||
Rust Pregel exists but DeepAgents logic uses `AgentExecutor` and does not share middleware/state reducers.
|
||||
|
||||
## Scope Decision (Required vs Optional)
|
||||
|
||||
### Required (Core Parity)
|
||||
- **P0** ToolResult + state update pipeline (Phase 1).
|
||||
- **P0** Filesystem + planning middleware parity (Phase 2).
|
||||
- **P1** Subagent parity (Phase 3).
|
||||
- **P1** High-level factory API (Phase 5).
|
||||
|
||||
### Optional (Nice-to-have / Riskier)
|
||||
- **P2** Sandbox execution + `execute` tool (Phase 4).
|
||||
- **P2** Pregel alignment bridge (Phase 7).
|
||||
- **P3** Backend factory + Store backend (Gap 5).
|
||||
- **P3** Regex grep or dual-mode grep (Gap 7).
|
||||
- **P3** Structured output + cache hooks (Gap 8).
|
||||
|
||||
## Pregel-First Implementation Principle (Now)
|
||||
|
||||
- Implement P0/P1 features so they can be shared by **both** the current `AgentExecutor`
|
||||
and the Pregel workflow vertices, even before Phase 7.
|
||||
- Favor **immutable state updates** (`StateUpdate` / `ToolResult` → reducer-style merge) over in-place mutation.
|
||||
- Keep tool execution **Command-like** (message + state updates) to preserve LangGraph parity.
|
||||
|
||||
## Implementation Plan (Rig-based)
|
||||
|
||||
### Phase 1 (P0, Required): Add ToolResult + State Update Pipeline
|
||||
|
||||
- **Goal:** Allow tools to return both messages and state updates.
|
||||
- **Changes:**
|
||||
- Add a `ToolResult` struct (e.g., `{ message: String, updates: Vec<StateUpdate> }`).
|
||||
- Update `Tool` trait to return `ToolResult` (or `Result<ToolResult, MiddlewareError>`).
|
||||
- Update `AgentExecutor::execute_tool_call` to:
|
||||
- set `ToolRuntime` with `tool_call_id`,
|
||||
- apply `StateUpdate`s to `AgentState`,
|
||||
- add `Message::tool` with the returned message.
|
||||
- Define `ToolResult` so it can be consumed by Pregel vertices later (e.g., a helper to convert updates into `WorkflowMessage` or workflow `StateUpdate`).
|
||||
- **Targets:**
|
||||
- `rust-research-agent/crates/rig-deepagents/src/middleware/traits.rs`
|
||||
- `rust-research-agent/crates/rig-deepagents/src/executor.rs`
|
||||
- `rust-research-agent/crates/rig-deepagents/src/runtime.rs`
|
||||
- **Tests:**
|
||||
- Tool result update flow updates `AgentState.todos` and `AgentState.files`.
|
||||
|
||||
### Phase 2 (P0, Required): Filesystem + Planning Middleware Parity
|
||||
|
||||
- **Goal:** Mirror Python `TodoListMiddleware` + `FilesystemMiddleware` behaviors.
|
||||
- **Changes:**
|
||||
- Add a `TodoListMiddleware` (or equivalent) that injects `write_todos` tool + system prompt guidance.
|
||||
- Update `WriteTodosTool` to return `StateUpdate::SetTodos` via `ToolResult`.
|
||||
- Add a `FilesystemMiddleware` that injects file tools and prompt guidance.
|
||||
- Update `WriteFileTool` / `EditFileTool` to return `StateUpdate::UpdateFiles` when backend returns `files_update`.
|
||||
- Ensure file updates support reducer-style merges (future Pregel alignment).
|
||||
- **Targets:**
|
||||
- `rust-research-agent/crates/rig-deepagents/src/middleware/`
|
||||
- `rust-research-agent/crates/rig-deepagents/src/tools/write_todos.rs`
|
||||
- `rust-research-agent/crates/rig-deepagents/src/tools/write_file.rs`
|
||||
- `rust-research-agent/crates/rig-deepagents/src/tools/edit_file.rs`
|
||||
- **Tests:**
|
||||
- `write_todos` updates state.
|
||||
- `write_file` / `edit_file` updates state with `MemoryBackend`.
|
||||
|
||||
### Phase 3 (P1, Required): Subagent Parity (Middleware + Updates)
|
||||
|
||||
- **Goal:** Align subagent behavior with LangChain DeepAgents.
|
||||
- **Changes:**
|
||||
- Apply `default_middleware` in `SubAgentMiddleware::new` (pass into `SubAgentExecutorConfig`).
|
||||
- Add `interrupt_on` to `SubAgentSpec` and plumb through execution.
|
||||
- Extend `SubAgentMiddlewareConfig` with `default_interrupt_on` and apply `HumanInTheLoopMiddleware` to subagents.
|
||||
- Update `TaskTool` to return state updates (merge subagent files into parent using `IsolatedState::merge_files`).
|
||||
- **Targets:**
|
||||
- `rust-research-agent/crates/rig-deepagents/src/middleware/subagent/*`
|
||||
- **Tests:**
|
||||
- Subagent file updates propagate to parent.
|
||||
- `interrupt_on` triggers `HumanInTheLoopMiddleware` for subagent tools.
|
||||
|
||||
### Phase 4 (P2, Optional): Sandbox Execution + Execute Tool
|
||||
|
||||
- **Goal:** Provide `execute` tool parity and safe shell execution.
|
||||
- **Changes:**
|
||||
- Add a new trait (e.g., `ExecutableBackend`) or extend `Backend` with `execute`.
|
||||
- Implement a `SandboxBackend` (local command runner with allowlist/root restriction).
|
||||
- Implement `ExecuteTool` and include it in default toolset when backend supports execution.
|
||||
- **Targets:**
|
||||
- `rust-research-agent/crates/rig-deepagents/src/backends/`
|
||||
- `rust-research-agent/crates/rig-deepagents/src/tools/execute.rs`
|
||||
- `rust-research-agent/crates/rig-deepagents/src/tools/mod.rs`
|
||||
- **Tests:**
|
||||
- `execute` tool only available when backend supports execution.
|
||||
|
||||
### Phase 5 (P1, Required): High-Level Factory API
|
||||
|
||||
- **Goal:** Provide `create_deep_agent` style factory using Rig.
|
||||
- **Changes:**
|
||||
- Add `create_deep_agent` or `DeepAgentBuilder` that wires:
|
||||
- default middleware stack (TodoList, Filesystem, SubAgent, Summarization, PatchToolCalls, optional HumanInTheLoop),
|
||||
- base system prompt (DeepAgents-style),
|
||||
- recursion limit (match 1000),
|
||||
- optional tools/subagents/interrupt_on.
|
||||
- Use `RigAgentAdapter` for LLM and `RigToolAdapter` for Rig tools.
|
||||
- **Targets:**
|
||||
- `rust-research-agent/crates/rig-deepagents/src/lib.rs`
|
||||
- `rust-research-agent/crates/rig-deepagents/src/config.rs` or new builder module.
|
||||
- **Tests:**
|
||||
- Factory creates an executor with expected default tools and prompts.
|
||||
|
||||
### Phase 6 (P3, Optional): Parity Extensions
|
||||
|
||||
- **Regex grep:** add `grep_regex` tool or a config flag to switch regex vs literal.
|
||||
- **Store backend:** implement a `StoreBackend` using a persistent KV store (e.g., sqlite) and allow backend factory hooks.
|
||||
- **Structured output:** map to Rig structured outputs if available, or add middleware to validate JSON responses.
|
||||
- **Prompt caching:** optional middleware if Rig provider exposes caching semantics.
|
||||
- **Tool-result eviction:** add large tool result interception and file-backed storage.
|
||||
- **Streaming/checkpointer:** add stream channels and checkpointer integration for agent execution.
|
||||
- **Parallel tool calls:** enable concurrent tool execution when multiple tool_calls are returned.
|
||||
|
||||
#### ToolResult-based large tool result eviction (design draft)
|
||||
|
||||
- **Goal:** prevent oversized `ToolResult.message` strings from bloating context by evicting to `/large_tool_results/{tool_call_id}`.
|
||||
- **Trigger:** if `result.message.len() > 4 * token_limit` (default 20k token ≈ 80k chars); skip for filesystem tools to avoid recursion.
|
||||
- **Flow:** after tool execution, if oversized → `backend.write(evict_path, result.message)` → replace message with a short notice + first 10 lines sample; attach `StateUpdate::UpdateFiles` when backend returns `files_update`.
|
||||
- **Integration point:** add a `ToolResultInterceptor` hook in `AgentExecutor` (or new middleware hook) that transforms `ToolResult` before `Message::tool` is added.
|
||||
- **Open questions:** path sanitization for `tool_call_id`, token estimation source, and whether to expose config on `FilesystemMiddleware` vs executor.
|
||||
|
||||
### Phase 7 (P2, Optional): Pregel Alignment
|
||||
|
||||
- **Goal:** Align DeepAgents execution with the Pregel workflow runtime.
|
||||
- **Changes:**
|
||||
- Add a `create_deep_agent_graph` or bridge that builds a `WorkflowGraph` with Agent/Tool/SubAgent vertices.
|
||||
- Port middleware behaviors (filesystem/todo/task updates) into Pregel vertices or a shared ToolResult pipeline.
|
||||
- Ensure state updates follow LangGraph-style Command semantics.
|
||||
- **Targets:**
|
||||
- `rust-research-agent/crates/rig-deepagents/src/workflow/*`
|
||||
- `rust-research-agent/crates/rig-deepagents/src/pregel/*`
|
||||
|
||||
## Suggested Sequencing
|
||||
|
||||
1. Phase 1 (P0) - unlocks state update pipeline (foundation).
|
||||
2. Phase 2 (P0) - filesystem + planning parity on top of state updates.
|
||||
3. Phase 3 (P1) - subagent parity with update propagation.
|
||||
4. Phase 5 (P1) - high-level factory API for ergonomics.
|
||||
5. Phase 4 (P2) - sandbox execute tool (optional).
|
||||
6. Phase 6 (P3) - optional parity extensions.
|
||||
26
task_plan.md
26
task_plan.md
@@ -1,26 +0,0 @@
|
||||
# Task Plan: Compare DeepAgents vs LangChain and plan Rig implementation
|
||||
|
||||
## Goal
|
||||
Identify differences between current `rust-research-agent/crates/rig-deepagents` and latest LangChain DeepAgents, then produce a concrete implementation plan using Rust Rig Framework.
|
||||
|
||||
## Phases
|
||||
- [x] Phase 1: Plan and setup
|
||||
- [x] Phase 2: Research/gather information
|
||||
- [x] Phase 3: Execute/build plan
|
||||
- [x] Phase 4: Review and deliver
|
||||
- [x] Phase 5: Full audit (pregel-based comparison)
|
||||
- [x] Phase 6: Implement P0 Phase 1 (ToolResult + state updates)
|
||||
|
||||
## Key Questions
|
||||
1. What is the latest LangChain DeepAgents feature set and API surface we should compare against?
|
||||
2. What gaps exist in `rust-research-agent/crates/rig-deepagents` relative to that?
|
||||
3. How can each gap be implemented using Rig Framework in Rust (design + steps)?
|
||||
|
||||
## Decisions Made
|
||||
- Use repository inspection for current Rust implementation and public source/docs for latest LangChain DeepAgents.
|
||||
|
||||
## Errors Encountered
|
||||
- `cargo test` failed due to ToolResult API changes in tests: updated assertions/imports and reran successfully.
|
||||
|
||||
## Status
|
||||
**Completed** - P0 Phase 1 implemented; full test suite passed.
|
||||
310
uv.lock
generated
310
uv.lock
generated
@@ -32,14 +32,14 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "anyio"
|
||||
version = "4.12.0"
|
||||
version = "4.12.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "idna" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/16/ce/8a777047513153587e5434fd752e89334ac33e379aa3497db860eeb60377/anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0", size = 228266, upload-time = "2025-11-28T23:37:38.911Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/96/f0/5eb65b2bb0d09ac6776f2eb54adee6abe8228ea05b20a5ad0e4945de8aac/anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703", size = 228685, upload-time = "2026-01-06T11:45:21.246Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/7f/9c/36c5c37947ebfb8c7f22e0eb6e4d188ee2d53aa3880f3f2744fb894f0cb1/anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb", size = 113362, upload-time = "2025-11-28T23:36:57.897Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/0e/27be9fdef66e72d64c0cdc3cc2823101b80585f8119b5c112c2e8f5f7dab/anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c", size = 113592, upload-time = "2026-01-06T11:45:19.497Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -53,15 +53,15 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "arxiv"
|
||||
version = "2.3.1"
|
||||
version = "2.4.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "feedparser" },
|
||||
{ name = "requests" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/dd/95/65e38ddfb54762a8f1777bbe80da2cebf7941376e67a2212de487d9372db/arxiv-2.3.1.tar.gz", hash = "sha256:08567185dfc102c8d349de4b9e84dfde0af46d6402486e3009afc90f8ccf9709", size = 16692, upload-time = "2025-11-13T06:22:59.853Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/8d/aa/dc1c6c633f63fce090e7c067af8c528a5e61218a61c266ff615d46cbde0a/arxiv-2.4.0.tar.gz", hash = "sha256:cabe5470d031aa3f22d2744a7600391c62c3489653f0c62bec9019e62bb0554b", size = 74546, upload-time = "2026-01-05T02:43:16.823Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/90/7f/340847023184305a6378d75ec71e1dd38a942dfe71b7c29314b8fbe26948/arxiv-2.3.1-py3-none-any.whl", hash = "sha256:eb5a0b76808cc0a16de0c1448df0f927a3cf576096686d8e335a98b8872df1be", size = 11565, upload-time = "2025-11-13T06:22:58.662Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/11/63/9e71153b2d48c98f8079c90d7211bc65515cc1ad18c3328c3c0472e68f44/arxiv-2.4.0-py3-none-any.whl", hash = "sha256:c02ccb09a777aaadd75d3bc1d2627894ef9c987c651d0dacd864b9f69fb0569f", size = 12065, upload-time = "2026-01-05T02:43:12.542Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -107,22 +107,13 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9d/2a/9186535ce58db529927f6cf5990a849aa9e052eea3e2cfefe20b9e1802da/bracex-2.6-py3-none-any.whl", hash = "sha256:0b0049264e7340b3ec782b5cb99beb325f36c3782a32e36e876452fd49a09952", size = 11508, upload-time = "2025-06-22T19:12:29.781Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cachetools"
|
||||
version = "6.2.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/bc/1d/ede8680603f6016887c062a2cf4fc8fdba905866a3ab8831aa8aa651320c/cachetools-6.2.4.tar.gz", hash = "sha256:82c5c05585e70b6ba2d3ae09ea60b79548872185d2f24ae1f2709d37299fd607", size = 31731, upload-time = "2025-12-15T18:24:53.744Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/fc/1d7b80d0eb7b714984ce40efc78859c022cd930e402f599d8ca9e39c78a4/cachetools-6.2.4-py3-none-any.whl", hash = "sha256:69a7a52634fed8b8bf6e24a050fb60bff1c9bd8f6d24572b99c32d4e71e62a51", size = 11551, upload-time = "2025-12-15T18:24:52.332Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "certifi"
|
||||
version = "2025.11.12"
|
||||
version = "2026.1.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e0/2d/a891ca51311197f6ad14a7ef42e2399f36cf2f9bd44752b3dc4eab60fdc5/certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120", size = 154268, upload-time = "2026-01-04T02:42:41.825Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e6/ad/3cc14f097111b4de0040c83a525973216457bbeeb63739ef1ed275c1c021/certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c", size = 152900, upload-time = "2026-01-04T02:42:40.15Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -379,7 +370,7 @@ dev = [
|
||||
|
||||
[[package]]
|
||||
name = "deepagents"
|
||||
version = "0.3.1"
|
||||
version = "0.3.5"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "langchain" },
|
||||
@@ -388,9 +379,9 @@ dependencies = [
|
||||
{ name = "langchain-google-genai" },
|
||||
{ name = "wcmatch" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/01/53/d8e663bf01af6a2c93373da5b488c7826ac84b742cc861ffd5d72467766d/deepagents-0.3.1.tar.gz", hash = "sha256:99d89cc5ce4729ac27fd2afdc93a132d8b863e5972ecede1c6d21cd921dae298", size = 51587, upload-time = "2025-12-23T18:49:14.793Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/03/30/648642f15f5e84778088e788159723ca97fe1e2a1ab2717ea7159da3f825/deepagents-0.3.5.tar.gz", hash = "sha256:9310e7b872244160555f1f2e914465b2b8682bcf5cb7c5d5f21f0b8e9fc127b2", size = 63104, upload-time = "2026-01-09T17:36:17.563Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/04/81b3c1f66194405da0856b8e72680ae3b2dc70f0c5ce44ee665279c24954/deepagents-0.3.1-py3-none-any.whl", hash = "sha256:f62c735715f8d18edbde6bcd4b54c5a8155f38a3625292ba43503ea947929985", size = 54263, upload-time = "2025-12-23T18:49:13.523Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/26/b1/670fd57c592de7c084438b52b319f7d38feab78fa6bea76f6a05fc6588ec/deepagents-0.3.5-py3-none-any.whl", hash = "sha256:9a2a08aa7523531a9f1a80d3f40f2f30d9970ea8657d4425efab1d5cf66addff", size = 68016, upload-time = "2026-01-09T17:36:16.351Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -449,16 +440,15 @@ sdist = { url = "https://files.pythonhosted.org/packages/e6/79/d4f20e91327c98096
|
||||
|
||||
[[package]]
|
||||
name = "google-auth"
|
||||
version = "2.45.0"
|
||||
version = "2.47.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "cachetools" },
|
||||
{ name = "pyasn1-modules" },
|
||||
{ name = "rsa" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e5/00/3c794502a8b892c404b2dea5b3650eb21bfc7069612fbfd15c7f17c1cb0d/google_auth-2.45.0.tar.gz", hash = "sha256:90d3f41b6b72ea72dd9811e765699ee491ab24139f34ebf1ca2b9cc0c38708f3", size = 320708, upload-time = "2025-12-15T22:58:42.889Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/60/3c/ec64b9a275ca22fa1cd3b6e77fefcf837b0732c890aa32d2bd21313d9b33/google_auth-2.47.0.tar.gz", hash = "sha256:833229070a9dfee1a353ae9877dcd2dec069a8281a4e72e72f77d4a70ff945da", size = 323719, upload-time = "2026-01-06T21:55:31.045Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c6/97/451d55e05487a5cd6279a01a7e34921858b16f7dc8aa38a2c684743cd2b3/google_auth-2.45.0-py2.py3-none-any.whl", hash = "sha256:82344e86dc00410ef5382d99be677c6043d72e502b625aa4f4afa0bdacca0f36", size = 233312, upload-time = "2025-12-15T22:58:40.777Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/db/18/79e9008530b79527e0d5f79e7eef08d3b179b7f851cfd3a2f27822fbdfa9/google_auth-2.47.0-py3-none-any.whl", hash = "sha256:c516d68336bfde7cf0da26aab674a36fedcf04b37ac4edd59c597178760c3498", size = 234867, upload-time = "2026-01-06T21:55:28.6Z" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
@@ -468,7 +458,7 @@ requests = [
|
||||
|
||||
[[package]]
|
||||
name = "google-genai"
|
||||
version = "1.56.0"
|
||||
version = "1.57.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
@@ -482,9 +472,9 @@ dependencies = [
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "websockets" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/70/ad/d3ac5a102135bd3f1e4b1475ca65d2bd4bcc22eb2e9348ac40fe3fadb1d6/google_genai-1.56.0.tar.gz", hash = "sha256:0491af33c375f099777ae207d9621f044e27091fafad4c50e617eba32165e82f", size = 340451, upload-time = "2025-12-17T12:35:05.412Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/2b/b4/8251c2d2576224a4b51a8ab6159820f9200b8da28ff555c78ee15607096e/google_genai-1.57.0.tar.gz", hash = "sha256:0ff9c36b8d68abfbdbd13b703ece926de5f3e67955666b36315ecf669b94a826", size = 485648, upload-time = "2026-01-07T20:38:20.271Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/84/93/94bc7a89ef4e7ed3666add55cd859d1483a22737251df659bf1aa46e9405/google_genai-1.56.0-py3-none-any.whl", hash = "sha256:9e6b11e0c105ead229368cb5849a480e4d0185519f8d9f538d61ecfcf193b052", size = 426563, upload-time = "2025-12-17T12:35:03.717Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/02/858bdae08e2184b6afe0b18bc3113318522c9cf326a5a1698055edd31f88/google_genai-1.57.0-py3-none-any.whl", hash = "sha256:d63c7a89a1f549c4d14032f41a0cdb4b6fe3f565e2eee6b5e0907a0aeceabefd", size = 713323, upload-time = "2026-01-07T20:38:18.051Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -530,6 +520,19 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/19/41/0b430b01a2eb38ee887f88c1f07644a1df8e289353b78e82b37ef988fb64/grpcio-1.76.0-cp314-cp314-win_amd64.whl", hash = "sha256:922fa70ba549fce362d2e2871ab542082d66e2aaf0c19480ea453905b01f384e", size = 4834462, upload-time = "2025-10-21T16:22:39.772Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grpcio-health-checking"
|
||||
version = "1.76.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "grpcio" },
|
||||
{ name = "protobuf" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/3e/96/5a52dcf21078b47ffa0c2ed613c3153a06f138edb6133792bace5f1ccc1d/grpcio_health_checking-1.76.0.tar.gz", hash = "sha256:b7a99d74096b3ab3a59987fc02374068e1c180a352e8d1f79f10e5a23727098d", size = 16784, upload-time = "2025-10-21T16:28:55.204Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/65/e6/746dffa51399827e38bb3f3f1ad656a3d8c1255039b256a6f76593368768/grpcio_health_checking-1.76.0-py3-none-any.whl", hash = "sha256:9743f345a855ba030cc7c381361606870b79d33bb71d7756efa47b6faa970f81", size = 18910, upload-time = "2025-10-21T16:27:26.332Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grpcio-tools"
|
||||
version = "1.75.1"
|
||||
@@ -647,7 +650,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "ipython"
|
||||
version = "9.8.0"
|
||||
version = "9.9.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
||||
@@ -661,9 +664,9 @@ dependencies = [
|
||||
{ name = "stack-data" },
|
||||
{ name = "traitlets" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/12/51/a703c030f4928646d390b4971af4938a1b10c9dfce694f0d99a0bb073cb2/ipython-9.8.0.tar.gz", hash = "sha256:8e4ce129a627eb9dd221c41b1d2cdaed4ef7c9da8c17c63f6f578fe231141f83", size = 4424940, upload-time = "2025-12-03T10:18:24.353Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/46/dd/fb08d22ec0c27e73c8bc8f71810709870d51cadaf27b7ddd3f011236c100/ipython-9.9.0.tar.gz", hash = "sha256:48fbed1b2de5e2c7177eefa144aba7fcb82dac514f09b57e2ac9da34ddb54220", size = 4425043, upload-time = "2026-01-05T12:36:46.233Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/df/8ee1c5dd1e3308b5d5b2f2dfea323bb2f3827da8d654abb6642051199049/ipython-9.8.0-py3-none-any.whl", hash = "sha256:ebe6d1d58d7d988fbf23ff8ff6d8e1622cfdb194daf4b7b73b792c4ec3b85385", size = 621374, upload-time = "2025-12-03T10:18:22.335Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/92/162cfaee4ccf370465c5af1ce36a9eacec1becb552f2033bb3584e6f640a/ipython-9.9.0-py3-none-any.whl", hash = "sha256:b457fe9165df2b84e8ec909a97abcf2ed88f565970efba16b1f7229c283d252b", size = 621431, upload-time = "2026-01-05T12:36:44.669Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -795,7 +798,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "jupyter-client"
|
||||
version = "8.7.0"
|
||||
version = "8.8.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "jupyter-core" },
|
||||
@@ -804,9 +807,9 @@ dependencies = [
|
||||
{ name = "tornado" },
|
||||
{ name = "traitlets" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a6/27/d10de45e8ad4ce872372c4a3a37b7b35b6b064f6f023a5c14ffcced4d59d/jupyter_client-8.7.0.tar.gz", hash = "sha256:3357212d9cbe01209e59190f67a3a7e1f387a4f4e88d1e0433ad84d7b262531d", size = 344691, upload-time = "2025-12-09T18:37:01.953Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/05/e4/ba649102a3bc3fbca54e7239fb924fd434c766f855693d86de0b1f2bec81/jupyter_client-8.8.0.tar.gz", hash = "sha256:d556811419a4f2d96c869af34e854e3f059b7cc2d6d01a9cd9c85c267691be3e", size = 348020, upload-time = "2026-01-08T13:55:47.938Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/bb/f5/fddaec430367be9d62a7ed125530e133bfd4a1c0350fe221149ee0f2b526/jupyter_client-8.7.0-py3-none-any.whl", hash = "sha256:3671a94fd25e62f5f2f554f5e95389c2294d89822378a5f2dd24353e1494a9e0", size = 106215, upload-time = "2025-12-09T18:37:00.024Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2d/0b/ceb7694d864abc0a047649aec263878acb9f792e1fec3e676f22dc9015e3/jupyter_client-8.8.0-py3-none-any.whl", hash = "sha256:f93a5b99c5e23a507b773d3a1136bd6e16c67883ccdbd9a829b0bbdb98cd7d7a", size = 107371, upload-time = "2026-01-08T13:55:45.562Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -833,35 +836,35 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain"
|
||||
version = "1.2.0"
|
||||
version = "1.2.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
{ name = "langgraph" },
|
||||
{ name = "pydantic" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b1/12/3a74c22abdfddd877dfc2ee666d516f9132877fcd25eb4dd694835c59c79/langchain-1.2.0.tar.gz", hash = "sha256:a087d1e2b2969819e29a91a6d5f98302aafe31bd49ba377ecee3bf5a5dcfe14a", size = 536126, upload-time = "2025-12-15T14:51:42.24Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/5f/78/9565319259d92818d96f30d55507ee1072fbf5c008b95a6acecf5e47c4d6/langchain-1.2.3.tar.gz", hash = "sha256:9d6171f9c3c760ca3c7c2cf8518e6f8625380962c488b41e35ebff1f1d611077", size = 548296, upload-time = "2026-01-08T20:26:30.149Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/23/00/4e3fa0d90f5a5c376ccb8ca983d0f0f7287783dfac48702e18f01d24673b/langchain-1.2.0-py3-none-any.whl", hash = "sha256:82f0d17aa4fbb11560b30e1e7d4aeb75e3ad71ce09b85c90ab208b181a24ffac", size = 102828, upload-time = "2025-12-15T14:51:40.802Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/e5/9b4f58533f8ce3013b1a993289eb11e8607d9c9d9d14699b29c6ac3b4132/langchain-1.2.3-py3-none-any.whl", hash = "sha256:5cdc7c80f672962b030c4b0d16d0d8f26d849c0ada63a4b8653a20d7505512ae", size = 106428, upload-time = "2026-01-08T20:26:29.162Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-anthropic"
|
||||
version = "1.3.0"
|
||||
version = "1.3.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anthropic" },
|
||||
{ name = "langchain-core" },
|
||||
{ name = "pydantic" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/de/50/cc3b3e0410d86de457d7a100dde763fc1c33c4ce884e883659aa4cf95538/langchain_anthropic-1.3.0.tar.gz", hash = "sha256:497a937ee0310c588196bff37f39f02d43d87bff3a12d16278bdbc3bd0e9a80b", size = 707207, upload-time = "2025-12-12T20:20:57.417Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0d/b6/ac5ee84e15bf79844c9c791f99a614c7ec7e1a63c2947e55977be01a81b4/langchain_anthropic-1.3.1.tar.gz", hash = "sha256:4f3d7a4a7729ab1aeaf62d32c87d4d227c1b5421668ca9e3734562b383470b07", size = 708940, upload-time = "2026-01-05T21:07:19.345Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/29/ca/0725bc347a9c226da9d76f85bf7d03115caec7dbc87876af68579c4ab24e/langchain_anthropic-1.3.0-py3-none-any.whl", hash = "sha256:3823560e1df15d6082636baa04f87cb59052ba70aada0eba381c4679b1ce0eba", size = 45724, upload-time = "2025-12-12T20:20:56.287Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/4f/7a5b32764addf4b757545b89899b9d76688176f19e4ee89868e3b8bbfd0f/langchain_anthropic-1.3.1-py3-none-any.whl", hash = "sha256:1fc28cf8037c30597ee6172fc2ff9e345efe8149a8c2a39897b1eebba2948322", size = 46328, upload-time = "2026-01-05T21:07:18.261Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "1.2.5"
|
||||
version = "1.2.7"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
@@ -873,14 +876,14 @@ dependencies = [
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "uuid-utils" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c8/86/bd678d69341ae4178bc8dfa04024d63636e5d580ff03d4502c8bc2262917/langchain_core-1.2.5.tar.gz", hash = "sha256:d674f6df42f07e846859b9d3afe547cad333d6bf9763e92c88eb4f8aaedcd3cc", size = 820445, upload-time = "2025-12-22T23:45:32.041Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a2/0e/664d8d81b3493e09cbab72448d2f9d693d1fa5aa2bcc488602203a9b6da0/langchain_core-1.2.7.tar.gz", hash = "sha256:e1460639f96c352b4a41c375f25aeb8d16ffc1769499fb1c20503aad59305ced", size = 837039, upload-time = "2026-01-09T17:44:25.505Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/83/bd/9df897cbc98290bf71140104ee5b9777cf5291afb80333aa7da5a497339b/langchain_core-1.2.5-py3-none-any.whl", hash = "sha256:3255944ef4e21b2551facb319bfc426057a40247c0a05de5bd6f2fc021fbfa34", size = 484851, upload-time = "2025-12-22T23:45:30.525Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6e/6f/34a9fba14d191a67f7e2ee3dbce3e9b86d2fa7310e2c7f2c713583481bd2/langchain_core-1.2.7-py3-none-any.whl", hash = "sha256:452f4fef7a3d883357b22600788d37e3d8854ef29da345b7ac7099f33c31828b", size = 490232, upload-time = "2026-01-09T17:44:24.236Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-google-genai"
|
||||
version = "4.1.2"
|
||||
version = "4.1.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "filetype" },
|
||||
@@ -888,23 +891,23 @@ dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
{ name = "pydantic" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/2e/04/c8d2840d96f05485abeb5288bd88ec8c5fb7a24065968201fa54969a47d8/langchain_google_genai-4.1.2.tar.gz", hash = "sha256:aa0dd7807a9a15651d10cd228c574f23fe46e2ce62921bf21d73a63869ecd814", size = 276143, upload-time = "2025-12-19T04:10:57.799Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ba/85/078d5aba488a82a53b8372ac1037dee4f64b020bac69e6a07e37a5059059/langchain_google_genai-4.1.3.tar.gz", hash = "sha256:28966c8fe58c9a401fdc37aeeeb0eb51744210803838ce050f022fc53d2f994e", size = 277024, upload-time = "2026-01-05T23:29:34.362Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/2f/a63dde25c9d11340d0f5f538a9fea77571b4b4e73294ad58fa6ea84079a0/langchain_google_genai-4.1.2-py3-none-any.whl", hash = "sha256:89790f2e3ca113f7e45883f541a834120d279e21f235fffc491c81cd1af11fdd", size = 65640, upload-time = "2025-12-19T04:10:56.386Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/aa/ca61dc2d202a23d7605a5c0ea24bd86a39a5c23c932a166b87c7797747c5/langchain_google_genai-4.1.3-py3-none-any.whl", hash = "sha256:5d710e2dcf449d49704bdbcd31729be90b386fa008395f9552a5c090241de1a5", size = 66262, upload-time = "2026-01-05T23:29:32.924Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-openai"
|
||||
version = "1.1.6"
|
||||
version = "1.1.7"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
{ name = "openai" },
|
||||
{ name = "tiktoken" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ae/67/228dc28b4498ea16422577013b5bb4ba35a1b99f8be975d6747c7a9f7e6a/langchain_openai-1.1.6.tar.gz", hash = "sha256:e306612654330ae36fb6bbe36db91c98534312afade19e140c3061fe4208dac8", size = 1038310, upload-time = "2025-12-18T17:58:52.84Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/38/b7/30bfc4d1b658a9ee524bcce3b0b2ec9c45a11c853a13c4f0c9da9882784b/langchain_openai-1.1.7.tar.gz", hash = "sha256:f5ec31961ed24777548b63a5fe313548bc6e0eb9730d6552b8c6418765254c81", size = 1039134, upload-time = "2026-01-07T19:44:59.728Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/db/5b/1f6521df83c1a8e8d3f52351883b59683e179c0aa1bec75d0a77a394c9e7/langchain_openai-1.1.6-py3-none-any.whl", hash = "sha256:c42d04a67a85cee1d994afe400800d2b09ebf714721345f0b651eb06a02c3948", size = 84701, upload-time = "2025-12-18T17:58:51.527Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/64/a1/50e7596aca775d8c3883eceeaf47489fac26c57c1abe243c00174f715a8a/langchain_openai-1.1.7-py3-none-any.whl", hash = "sha256:34e9cd686aac1a120d6472804422792bf8080a2103b5d21ee450c9e42d053815", size = 84753, upload-time = "2026-01-07T19:44:58.629Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -926,12 +929,13 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langgraph-api"
|
||||
version = "0.6.16"
|
||||
version = "0.6.30"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "cloudpickle" },
|
||||
{ name = "cryptography" },
|
||||
{ name = "grpcio" },
|
||||
{ name = "grpcio-health-checking" },
|
||||
{ name = "grpcio-tools" },
|
||||
{ name = "httpx" },
|
||||
{ name = "jsonschema-rs" },
|
||||
@@ -956,9 +960,9 @@ dependencies = [
|
||||
{ name = "uvicorn" },
|
||||
{ name = "watchfiles" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a0/b1/f653aa84eecfa235df168653598fd1964e20679e5dbb05d5e1e91e3b378b/langgraph_api-0.6.16.tar.gz", hash = "sha256:8d6947ed306fc5b931fb056bafce046c61c0be13949d136039c13ee13a17b18d", size = 418282, upload-time = "2025-12-30T23:48:36.66Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/37/85/6e823fc43d7e8a7aa8885011ee7a3bbf4bd38a91527b7bd1d953ed8fa496/langgraph_api-0.6.30.tar.gz", hash = "sha256:e2601f3e57b0792e9cef077ce36d793cd9d5b808f9b87f57bb81a1c2c192ce0f", size = 435730, upload-time = "2026-01-10T08:38:22.057Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/6c/9b44c51b04ddb5744493906d85564311a662b74d87ab299a6d4c80de304f/langgraph_api-0.6.16-py3-none-any.whl", hash = "sha256:4b6169f49191fc428e54cd34a531f9d361da8d7d736d44ed1b979d1eca12565e", size = 324532, upload-time = "2025-12-30T23:48:35.209Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/71/aa21c9d15dbec86d153aebfa3099765ef5e81415545abc8a2b85afcb70f2/langgraph_api-0.6.30-py3-none-any.whl", hash = "sha256:7c97dff0369065606be7fed8d5f951d876d27e23af56bbed0dca5a4839f90267", size = 342253, upload-time = "2026-01-10T08:38:20.204Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1009,7 +1013,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langgraph-runtime-inmem"
|
||||
version = "0.20.1"
|
||||
version = "0.22.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "blockbuster" },
|
||||
@@ -1019,27 +1023,27 @@ dependencies = [
|
||||
{ name = "starlette" },
|
||||
{ name = "structlog" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/31/12/219cc68f58e69fe9a92518ae2563cbf20b118e1a6abb8827daafb26a83a2/langgraph_runtime_inmem-0.20.1.tar.gz", hash = "sha256:6ee918f0a839f3044cbed69580b44eb9cadf31a4771fc2e6d66842c7b7722fb0", size = 101516, upload-time = "2025-12-09T22:01:54.588Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/fc/4e/1658cfe871c2cd02013e97663cb64e734b531b3102cebbe50523f9f839ae/langgraph_runtime_inmem-0.22.0.tar.gz", hash = "sha256:8c50ccdfe2654a8524c3729d24f83705360c03b7d6a1c362584e0546abaeb32b", size = 103368, upload-time = "2026-01-08T02:03:28.315Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/9c/b3883f862c46379bdd5fe4bd1eb8dce6c3897d7ec2d226b019e0866fad47/langgraph_runtime_inmem-0.20.1-py3-none-any.whl", hash = "sha256:27f4401372679c1132a31937524ca9fded071fc8e9a6cd5ac25e3860ebc8238b", size = 35183, upload-time = "2025-12-09T22:01:53.462Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/f9/09de2d2e09e122a93b4145487f1a4cd5923242ed4d3e3edfcea6fd6673cd/langgraph_runtime_inmem-0.22.0-py3-none-any.whl", hash = "sha256:46994bfebadc824e3b20374ed8ae151fa6da40eed3e43dd44c2a66d0185cb8ef", size = 37473, upload-time = "2026-01-08T02:03:27.372Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langgraph-sdk"
|
||||
version = "0.3.1"
|
||||
version = "0.3.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "httpx" },
|
||||
{ name = "orjson" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a9/d3/b6be0b0aba2a53a8920a2b0b4328a83121ec03eea9952e576d06a4182f6f/langgraph_sdk-0.3.1.tar.gz", hash = "sha256:f6dadfd2444eeff3e01405a9005c95fb3a028d4bd954ebec80ea6150084f92bb", size = 130312, upload-time = "2025-12-18T22:11:47.42Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/66/74/e1fa90bc6194f5409e37ce6d8174e45293b767fe7a11060a3c2a0e15c087/langgraph_sdk-0.3.2.tar.gz", hash = "sha256:3f2ed7b210c0748983b4596157ece9db2c5d6debd9d4878fad4683216a0b6fc4", size = 129502, upload-time = "2026-01-09T21:10:52.627Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/fe/0c1c9c01a154eba62b20b02fabe811fd94a2b810061ae9e4d8462b8cf85a/langgraph_sdk-0.3.1-py3-none-any.whl", hash = "sha256:0b856923bfd20bf3441ce9d03bef488aa333fb610e972618799a9d584436acad", size = 66517, upload-time = "2025-12-18T22:11:46.625Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/f3/253b46e87bc2cbbe69d71faec213465e01e9028bfa45d9b80a65b169b860/langgraph_sdk-0.3.2-py3-none-any.whl", hash = "sha256:0b0ab967eab59c20989d46f2020da439bd914ed8f4caf3326813be9b70d9037e", size = 66829, upload-time = "2026-01-09T21:10:51.739Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langsmith"
|
||||
version = "0.5.2"
|
||||
version = "0.6.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "httpx" },
|
||||
@@ -1051,50 +1055,50 @@ dependencies = [
|
||||
{ name = "uuid-utils" },
|
||||
{ name = "zstandard" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/43/7e/e38706c85dde3865c4eeadb044a19e6c7de4f40e2be0cc9100d2ff6e82aa/langsmith-0.5.2.tar.gz", hash = "sha256:a6186d555ba59732b1b10e2ba6fe34ee0b3c1bf3a7fb8d7be0dec367ac3b75f1", size = 883096, upload-time = "2025-12-30T13:41:36.132Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0c/8e/3ea7a8e9ce8c530204964207af7f7778597f5a548dc1a489c0c0940561f3/langsmith-0.6.2.tar.gz", hash = "sha256:c2efd7ed61eed3b6fdbf158ea2e9862bc2636f2edc95e90d2faad9462773d097", size = 1739277, upload-time = "2026-01-08T23:17:40.504Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ed/d8/91a8b483b30e0708a8911df10b4ce04ebf2b4b8dde8d020c124aec77380a/langsmith-0.5.2-py3-none-any.whl", hash = "sha256:42f8b853a18dd4d5f7fa38c8ff29e38da065a727022da410d91b3e13819aacc1", size = 283311, upload-time = "2025-12-30T13:41:33.915Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ed/e0/9d173dd2fa7f85d9ec4989f6f5a1a057d281daa8dada0ff8db0de0cb68aa/langsmith-0.6.2-py3-none-any.whl", hash = "sha256:1ea1a591f52683a5aeebdaa2b58458d72ce9598105dd8b29e16f7373631a6434", size = 282918, upload-time = "2026-01-08T23:17:38.858Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "librt"
|
||||
version = "0.7.5"
|
||||
version = "0.7.7"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b5/8a/071f6628363d83e803d4783e0cd24fb9c5b798164300fcfaaa47c30659c0/librt-0.7.5.tar.gz", hash = "sha256:de4221a1181fa9c8c4b5f35506ed6f298948f44003d84d2a8b9885d7e01e6cfa", size = 145868, upload-time = "2025-12-25T03:53:16.039Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b7/29/47f29026ca17f35cf299290292d5f8331f5077364974b7675a353179afa2/librt-0.7.7.tar.gz", hash = "sha256:81d957b069fed1890953c3b9c3895c7689960f233eea9a1d9607f71ce7f00b2c", size = 145910, upload-time = "2026-01-01T23:52:22.87Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9f/9a/8f61e16de0ff76590af893cfb5b1aa5fa8b13e5e54433d0809c7033f59ed/librt-0.7.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b1795c4b2789b458fa290059062c2f5a297ddb28c31e704d27e161386469691a", size = 55750, upload-time = "2025-12-25T03:52:26.975Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/7c/a8a883804851a066f301e0bad22b462260b965d5c9e7fe3c5de04e6f91f8/librt-0.7.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2fcbf2e135c11f721193aa5f42ba112bb1046afafbffd407cbc81d8d735c74d0", size = 57170, upload-time = "2025-12-25T03:52:27.948Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/5d/b3b47facf5945be294cf8a835b03589f70ee0e791522f99ec6782ed738b3/librt-0.7.5-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c039bbf79a9a2498404d1ae7e29a6c175e63678d7a54013a97397c40aee026c5", size = 165834, upload-time = "2025-12-25T03:52:29.09Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/b6/b26910cd0a4e43e5d02aacaaea0db0d2a52e87660dca08293067ee05601a/librt-0.7.5-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3919c9407faeeee35430ae135e3a78acd4ecaaaa73767529e2c15ca1d73ba325", size = 174820, upload-time = "2025-12-25T03:52:30.463Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a5/a3/81feddd345d4c869b7a693135a462ae275f964fcbbe793d01ea56a84c2ee/librt-0.7.5-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:26b46620e1e0e45af510d9848ea0915e7040605dd2ae94ebefb6c962cbb6f7ec", size = 189609, upload-time = "2025-12-25T03:52:31.492Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/a9/31310796ef4157d1d37648bf4a3b84555319f14cee3e9bad7bdd7bfd9a35/librt-0.7.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9bbb8facc5375476d392990dd6a71f97e4cb42e2ac66f32e860f6e47299d5e89", size = 184589, upload-time = "2025-12-25T03:52:32.59Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/32/22/da3900544cb0ac6ab7a2857850158a0a093b86f92b264aa6c4a4f2355ff3/librt-0.7.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e9e9c988b5ffde7be02180f864cbd17c0b0c1231c235748912ab2afa05789c25", size = 178251, upload-time = "2025-12-25T03:52:33.745Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/db/77/78e02609846e78b9b8c8e361753b3dbac9a07e6d5b567fe518de9e074ab0/librt-0.7.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:edf6b465306215b19dbe6c3fb63cf374a8f3e1ad77f3b4c16544b83033bbb67b", size = 199852, upload-time = "2025-12-25T03:52:34.826Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/25/05706f6b346429c951582f1b3561f4d5e1418d0d7ba1a0c181237cd77b3b/librt-0.7.5-cp313-cp313-win32.whl", hash = "sha256:060bde69c3604f694bd8ae21a780fe8be46bb3dbb863642e8dfc75c931ca8eee", size = 43250, upload-time = "2025-12-25T03:52:35.905Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/59/c38677278ac0b9ae1afc611382ef6c9ea87f52ad257bd3d8d65f0eacdc6a/librt-0.7.5-cp313-cp313-win_amd64.whl", hash = "sha256:a82d5a0ee43aeae2116d7292c77cc8038f4841830ade8aa922e098933b468b9e", size = 49421, upload-time = "2025-12-25T03:52:36.895Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c0/47/1d71113df4a81de5fdfbd3d7244e05d3d67e89f25455c3380ca50b92741e/librt-0.7.5-cp313-cp313-win_arm64.whl", hash = "sha256:3c98a8d0ac9e2a7cb8ff8c53e5d6e8d82bfb2839abf144fdeaaa832f2a12aa45", size = 42827, upload-time = "2025-12-25T03:52:37.856Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/97/ae/8635b4efdc784220f1378be640d8b1a794332f7f6ea81bb4859bf9d18aa7/librt-0.7.5-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:9937574e6d842f359b8585903d04f5b4ab62277a091a93e02058158074dc52f2", size = 55191, upload-time = "2025-12-25T03:52:38.839Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/11/ed7ef6955dc2032af37db9b0b31cd5486a138aa792e1bb9e64f0f4950e27/librt-0.7.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5cd3afd71e9bc146203b6c8141921e738364158d4aa7cdb9a874e2505163770f", size = 56894, upload-time = "2025-12-25T03:52:39.805Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/24/f1/02921d4a66a1b5dcd0493b89ce76e2762b98c459fe2ad04b67b2ea6fdd39/librt-0.7.5-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9cffa3ef0af29687455161cb446eff059bf27607f95163d6a37e27bcb37180f6", size = 163726, upload-time = "2025-12-25T03:52:40.79Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/65/87/27df46d2756fcb7a82fa7f6ca038a0c6064c3e93ba65b0b86fbf6a4f76a2/librt-0.7.5-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:82f3f088482e2229387eadf8215c03f7726d56f69cce8c0c40f0795aebc9b361", size = 172470, upload-time = "2025-12-25T03:52:42.226Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9f/a9/e65a35e5d423639f4f3d8e17301ff13cc41c2ff97677fe9c361c26dbfbb7/librt-0.7.5-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d7aa33153a5bb0bac783d2c57885889b1162823384e8313d47800a0e10d0070e", size = 186807, upload-time = "2025-12-25T03:52:43.688Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/b0/ac68aa582a996b1241773bd419823290c42a13dc9f494704a12a17ddd7b6/librt-0.7.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:265729b551a2dd329cc47b323a182fb7961af42abf21e913c9dd7d3331b2f3c2", size = 181810, upload-time = "2025-12-25T03:52:45.095Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e1/c1/03f6717677f20acd2d690813ec2bbe12a2de305f32c61479c53f7b9413bc/librt-0.7.5-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:168e04663e126416ba712114050f413ac306759a1791d87b7c11d4428ba75760", size = 175599, upload-time = "2025-12-25T03:52:46.177Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/d7/f976ff4c07c59b69bb5eec7e5886d43243075bbef834428124b073471c86/librt-0.7.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:553dc58987d1d853adda8aeadf4db8e29749f0b11877afcc429a9ad892818ae2", size = 196506, upload-time = "2025-12-25T03:52:47.327Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/74/004f068b8888e61b454568b5479f88018fceb14e511ac0609cccee7dd227/librt-0.7.5-cp314-cp314-win32.whl", hash = "sha256:263f4fae9eba277513357c871275b18d14de93fd49bf5e43dc60a97b81ad5eb8", size = 39747, upload-time = "2025-12-25T03:52:48.437Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/37/b1/ea3ec8fcf5f0a00df21f08972af77ad799604a306db58587308067d27af8/librt-0.7.5-cp314-cp314-win_amd64.whl", hash = "sha256:85f485b7471571e99fab4f44eeb327dc0e1f814ada575f3fa85e698417d8a54e", size = 45970, upload-time = "2025-12-25T03:52:49.389Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/30/5e3fb7ac4614a50fc67e6954926137d50ebc27f36419c9963a94f931f649/librt-0.7.5-cp314-cp314-win_arm64.whl", hash = "sha256:49c596cd18e90e58b7caa4d7ca7606049c1802125fcff96b8af73fa5c3870e4d", size = 39075, upload-time = "2025-12-25T03:52:50.395Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a4/7f/0af0a9306a06c2aabee3a790f5aa560c50ec0a486ab818a572dd3db6c851/librt-0.7.5-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:54d2aef0b0f5056f130981ad45081b278602ff3657fe16c88529f5058038e802", size = 57375, upload-time = "2025-12-25T03:52:51.439Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/57/1f/c85e510baf6572a3d6ef40c742eacedc02973ed2acdb5dba2658751d9af8/librt-0.7.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0b4791202296ad51ac09a3ff58eb49d9da8e3a4009167a6d76ac418a974e5fd4", size = 59234, upload-time = "2025-12-25T03:52:52.687Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/b1/bb6535e4250cd18b88d6b18257575a0239fa1609ebba925f55f51ae08e8e/librt-0.7.5-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:6e860909fea75baef941ee6436e0453612505883b9d0d87924d4fda27865b9a2", size = 183873, upload-time = "2025-12-25T03:52:53.705Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8e/49/ad4a138cca46cdaa7f0e15fa912ce3ccb4cc0d4090bfeb8ccc35766fa6d5/librt-0.7.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f02c4337bf271c4f06637f5ff254fad2238c0b8e32a3a480ebb2fc5e26f754a5", size = 194609, upload-time = "2025-12-25T03:52:54.884Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9c/2d/3b3cb933092d94bb2c1d3c9b503d8775f08d806588c19a91ee4d1495c2a8/librt-0.7.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7f51ffe59f4556243d3cc82d827bde74765f594fa3ceb80ec4de0c13ccd3416", size = 206777, upload-time = "2025-12-25T03:52:55.969Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3a/52/6e7611d3d1347812233dabc44abca4c8065ee97b83c9790d7ecc3f782bc8/librt-0.7.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0b7f080ba30601dfa3e3deed3160352273e1b9bc92e652f51103c3e9298f7899", size = 203208, upload-time = "2025-12-25T03:52:57.036Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/27/aa/466ae4654bd2d45903fbf180815d41e3ae8903e5a1861f319f73c960a843/librt-0.7.5-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:fb565b4219abc8ea2402e61c7ba648a62903831059ed3564fa1245cc245d58d7", size = 196698, upload-time = "2025-12-25T03:52:58.481Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/97/8f/424f7e4525bb26fe0d3e984d1c0810ced95e53be4fd867ad5916776e18a3/librt-0.7.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a3cfb15961e7333ea6ef033dc574af75153b5c230d5ad25fbcd55198f21e0cf", size = 217194, upload-time = "2025-12-25T03:52:59.575Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/33/13a4cb798a171b173f3c94db23adaf13a417130e1493933dc0df0d7fb439/librt-0.7.5-cp314-cp314t-win32.whl", hash = "sha256:118716de5ad6726332db1801bc90fa6d94194cd2e07c1a7822cebf12c496714d", size = 40282, upload-time = "2025-12-25T03:53:01.091Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5f/f1/62b136301796399d65dad73b580f4509bcbd347dff885a450bff08e80cb6/librt-0.7.5-cp314-cp314t-win_amd64.whl", hash = "sha256:3dd58f7ce20360c6ce0c04f7bd9081c7f9c19fc6129a3c705d0c5a35439f201d", size = 46764, upload-time = "2025-12-25T03:53:02.381Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/cb/940431d9410fda74f941f5cd7f0e5a22c63be7b0c10fa98b2b7022b48cb1/librt-0.7.5-cp314-cp314t-win_arm64.whl", hash = "sha256:08153ea537609d11f774d2bfe84af39d50d5c9ca3a4d061d946e0c9d8bce04a1", size = 39728, upload-time = "2025-12-25T03:53:03.306Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/5e/d979ccb0a81407ec47c14ea68fb217ff4315521730033e1dd9faa4f3e2c1/librt-0.7.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8f4a0b0a3c86ba9193a8e23bb18f100d647bf192390ae195d84dfa0a10fb6244", size = 55746, upload-time = "2026-01-01T23:51:29.828Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/2c/3b65861fb32f802c3783d6ac66fc5589564d07452a47a8cf9980d531cad3/librt-0.7.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5335890fea9f9e6c4fdf8683061b9ccdcbe47c6dc03ab8e9b68c10acf78be78d", size = 57174, upload-time = "2026-01-01T23:51:31.226Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/50/df/030b50614b29e443607220097ebaf438531ea218c7a9a3e21ea862a919cd/librt-0.7.7-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9b4346b1225be26def3ccc6c965751c74868f0578cbcba293c8ae9168483d811", size = 165834, upload-time = "2026-01-01T23:51:32.278Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/e1/bd8d1eacacb24be26a47f157719553bbd1b3fe812c30dddf121c0436fd0b/librt-0.7.7-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a10b8eebdaca6e9fdbaf88b5aefc0e324b763a5f40b1266532590d5afb268a4c", size = 174819, upload-time = "2026-01-01T23:51:33.461Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/7d/91d6c3372acf54a019c1ad8da4c9ecf4fc27d039708880bf95f48dbe426a/librt-0.7.7-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:067be973d90d9e319e6eb4ee2a9b9307f0ecd648b8a9002fa237289a4a07a9e7", size = 189607, upload-time = "2026-01-01T23:51:34.604Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/ac/44604d6d3886f791fbd1c6ae12d5a782a8f4aca927484731979f5e92c200/librt-0.7.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:23d2299ed007812cccc1ecef018db7d922733382561230de1f3954db28433977", size = 184586, upload-time = "2026-01-01T23:51:35.845Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5c/26/d8a6e4c17117b7f9b83301319d9a9de862ae56b133efb4bad8b3aa0808c9/librt-0.7.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6b6f8ea465524aa4c7420c7cc4ca7d46fe00981de8debc67b1cc2e9957bb5b9d", size = 178251, upload-time = "2026-01-01T23:51:37.018Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/99/ab/98d857e254376f8e2f668e807daccc1f445e4b4fc2f6f9c1cc08866b0227/librt-0.7.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f8df32a99cc46eb0ee90afd9ada113ae2cafe7e8d673686cf03ec53e49635439", size = 199853, upload-time = "2026-01-01T23:51:38.195Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7c/55/4523210d6ae5134a5da959900be43ad8bab2e4206687b6620befddb5b5fd/librt-0.7.7-cp313-cp313-win32.whl", hash = "sha256:86f86b3b785487c7760247bcdac0b11aa8bf13245a13ed05206286135877564b", size = 43247, upload-time = "2026-01-01T23:51:39.629Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/25/40/3ec0fed5e8e9297b1cf1a3836fb589d3de55f9930e3aba988d379e8ef67c/librt-0.7.7-cp313-cp313-win_amd64.whl", hash = "sha256:4862cb2c702b1f905c0503b72d9d4daf65a7fdf5a9e84560e563471e57a56949", size = 49419, upload-time = "2026-01-01T23:51:40.674Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/7a/aab5f0fb122822e2acbc776addf8b9abfb4944a9056c00c393e46e543177/librt-0.7.7-cp313-cp313-win_arm64.whl", hash = "sha256:0996c83b1cb43c00e8c87835a284f9057bc647abd42b5871e5f941d30010c832", size = 42828, upload-time = "2026-01-01T23:51:41.731Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/69/9c/228a5c1224bd23809a635490a162e9cbdc68d99f0eeb4a696f07886b8206/librt-0.7.7-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:23daa1ab0512bafdd677eb1bfc9611d8ffbe2e328895671e64cb34166bc1b8c8", size = 55188, upload-time = "2026-01-01T23:51:43.14Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ba/c2/0e7c6067e2b32a156308205e5728f4ed6478c501947e9142f525afbc6bd2/librt-0.7.7-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:558a9e5a6f3cc1e20b3168fb1dc802d0d8fa40731f6e9932dcc52bbcfbd37111", size = 56895, upload-time = "2026-01-01T23:51:44.534Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/77/de50ff70c80855eb79d1d74035ef06f664dd073fb7fb9d9fb4429651b8eb/librt-0.7.7-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2567cb48dc03e5b246927ab35cbb343376e24501260a9b5e30b8e255dca0d1d2", size = 163724, upload-time = "2026-01-01T23:51:45.571Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6e/19/f8e4bf537899bdef9e0bb9f0e4b18912c2d0f858ad02091b6019864c9a6d/librt-0.7.7-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6066c638cdf85ff92fc6f932d2d73c93a0e03492cdfa8778e6d58c489a3d7259", size = 172470, upload-time = "2026-01-01T23:51:46.823Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/42/4c/dcc575b69d99076768e8dd6141d9aecd4234cba7f0e09217937f52edb6ed/librt-0.7.7-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a609849aca463074c17de9cda173c276eb8fee9e441053529e7b9e249dc8b8ee", size = 186806, upload-time = "2026-01-01T23:51:48.009Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/f8/4094a2b7816c88de81239a83ede6e87f1138477d7ee956c30f136009eb29/librt-0.7.7-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:add4e0a000858fe9bb39ed55f31085506a5c38363e6eb4a1e5943a10c2bfc3d1", size = 181809, upload-time = "2026-01-01T23:51:49.35Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1b/ac/821b7c0ab1b5a6cd9aee7ace8309c91545a2607185101827f79122219a7e/librt-0.7.7-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:a3bfe73a32bd0bdb9a87d586b05a23c0a1729205d79df66dee65bb2e40d671ba", size = 175597, upload-time = "2026-01-01T23:51:50.636Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/f9/27f6bfbcc764805864c04211c6ed636fe1d58f57a7b68d1f4ae5ed74e0e0/librt-0.7.7-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:0ecce0544d3db91a40f8b57ae26928c02130a997b540f908cefd4d279d6c5848", size = 196506, upload-time = "2026-01-01T23:51:52.535Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/ba/c9b9c6fc931dd7ea856c573174ccaf48714905b1a7499904db2552e3bbaf/librt-0.7.7-cp314-cp314-win32.whl", hash = "sha256:8f7a74cf3a80f0c3b0ec75b0c650b2f0a894a2cec57ef75f6f72c1e82cdac61d", size = 39747, upload-time = "2026-01-01T23:51:53.683Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c5/69/cd1269337c4cde3ee70176ee611ab0058aa42fc8ce5c9dce55f48facfcd8/librt-0.7.7-cp314-cp314-win_amd64.whl", hash = "sha256:3d1fe2e8df3268dd6734dba33ededae72ad5c3a859b9577bc00b715759c5aaab", size = 45971, upload-time = "2026-01-01T23:51:54.697Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/fd/e0844794423f5583108c5991313c15e2b400995f44f6ec6871f8aaf8243c/librt-0.7.7-cp314-cp314-win_arm64.whl", hash = "sha256:2987cf827011907d3dfd109f1be0d61e173d68b1270107bb0e89f2fca7f2ed6b", size = 39075, upload-time = "2026-01-01T23:51:55.726Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/42/02/211fd8f7c381e7b2a11d0fdfcd410f409e89967be2e705983f7c6342209a/librt-0.7.7-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:8e92c8de62b40bfce91d5e12c6e8b15434da268979b1af1a6589463549d491e6", size = 57368, upload-time = "2026-01-01T23:51:56.706Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/b6/aca257affae73ece26041ae76032153266d110453173f67d7603058e708c/librt-0.7.7-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f683dcd49e2494a7535e30f779aa1ad6e3732a019d80abe1309ea91ccd3230e3", size = 59238, upload-time = "2026-01-01T23:51:58.066Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/96/47/7383a507d8e0c11c78ca34c9d36eab9000db5989d446a2f05dc40e76c64f/librt-0.7.7-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9b15e5d17812d4d629ff576699954f74e2cc24a02a4fc401882dd94f81daba45", size = 183870, upload-time = "2026-01-01T23:51:59.204Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a4/b8/50f3d8eec8efdaf79443963624175c92cec0ba84827a66b7fcfa78598e51/librt-0.7.7-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c084841b879c4d9b9fa34e5d5263994f21aea7fd9c6add29194dbb41a6210536", size = 194608, upload-time = "2026-01-01T23:52:00.419Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/23/d9/1b6520793aadb59d891e3b98ee057a75de7f737e4a8b4b37fdbecb10d60f/librt-0.7.7-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10c8fb9966f84737115513fecbaf257f9553d067a7dd45a69c2c7e5339e6a8dc", size = 206776, upload-time = "2026-01-01T23:52:01.705Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/db/331edc3bba929d2756fa335bfcf736f36eff4efcb4f2600b545a35c2ae58/librt-0.7.7-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:9b5fb1ecb2c35362eab2dbd354fd1efa5a8440d3e73a68be11921042a0edc0ff", size = 203206, upload-time = "2026-01-01T23:52:03.315Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/e1/6af79ec77204e85f6f2294fc171a30a91bb0e35d78493532ed680f5d98be/librt-0.7.7-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:d1454899909d63cc9199a89fcc4f81bdd9004aef577d4ffc022e600c412d57f3", size = 196697, upload-time = "2026-01-01T23:52:04.857Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/46/de55ecce4b2796d6d243295c221082ca3a944dc2fb3a52dcc8660ce7727d/librt-0.7.7-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7ef28f2e7a016b29792fe0a2dd04dec75725b32a1264e390c366103f834a9c3a", size = 217193, upload-time = "2026-01-01T23:52:06.159Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/61/33063e271949787a2f8dd33c5260357e3d512a114fc82ca7890b65a76e2d/librt-0.7.7-cp314-cp314t-win32.whl", hash = "sha256:5e419e0db70991b6ba037b70c1d5bbe92b20ddf82f31ad01d77a347ed9781398", size = 40277, upload-time = "2026-01-01T23:52:07.625Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/06/21/1abd972349f83a696ea73159ac964e63e2d14086fdd9bc7ca878c25fced4/librt-0.7.7-cp314-cp314t-win_amd64.whl", hash = "sha256:d6b7d93657332c817b8d674ef6bf1ab7796b4f7ce05e420fd45bd258a72ac804", size = 46765, upload-time = "2026-01-01T23:52:08.647Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/51/0e/b756c7708143a63fca65a51ca07990fa647db2cc8fcd65177b9e96680255/librt-0.7.7-cp314-cp314t-win_arm64.whl", hash = "sha256:142c2cd91794b79fd0ce113bd658993b7ede0fe93057668c2f98a45ca00b7e91", size = 39724, upload-time = "2026-01-01T23:52:09.745Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1190,7 +1194,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "openai"
|
||||
version = "2.14.0"
|
||||
version = "2.15.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
@@ -1202,9 +1206,9 @@ dependencies = [
|
||||
{ name = "tqdm" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d8/b1/12fe1c196bea326261718eb037307c1c1fe1dedc2d2d4de777df822e6238/openai-2.14.0.tar.gz", hash = "sha256:419357bedde9402d23bf8f2ee372fca1985a73348debba94bddff06f19459952", size = 626938, upload-time = "2025-12-19T03:28:45.742Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/94/f4/4690ecb5d70023ce6bfcfeabfe717020f654bde59a775058ec6ac4692463/openai-2.15.0.tar.gz", hash = "sha256:42eb8cbb407d84770633f31bf727d4ffb4138711c670565a41663d9439174fba", size = 627383, upload-time = "2026-01-09T22:10:08.603Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/27/4b/7c1a00c2c3fbd004253937f7520f692a9650767aa73894d7a34f0d65d3f4/openai-2.14.0-py3-none-any.whl", hash = "sha256:7ea40aca4ffc4c4a776e77679021b47eec1160e341f42ae086ba949c9dcc9183", size = 1067558, upload-time = "2025-12-19T03:28:43.727Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b5/df/c306f7375d42bafb379934c2df4c2fa3964656c8c782bac75ee10c102818/openai-2.15.0-py3-none-any.whl", hash = "sha256:6ae23b932cd7230f7244e52954daa6602716d6b9bf235401a107af731baea6c3", size = 1067879, upload-time = "2026-01-09T22:10:06.446Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1376,11 +1380,11 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "pathspec"
|
||||
version = "0.12.1"
|
||||
version = "1.0.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/4c/b2/bb8e495d5262bfec41ab5cb18f522f1012933347fb5d9e62452d446baca2/pathspec-1.0.3.tar.gz", hash = "sha256:bac5cf97ae2c2876e2d25ebb15078eb04d76e4b98921ee31c6f85ade8b59444d", size = 130841, upload-time = "2026-01-09T15:46:46.009Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/32/2b/121e912bd60eebd623f873fd090de0e84f322972ab25a7f9044c056804ed/pathspec-1.0.3-py3-none-any.whl", hash = "sha256:e80767021c1cc524aa3fb14bedda9c34406591343cc42797b386ce7b9354fb6c", size = 55021, upload-time = "2026-01-09T15:46:44.652Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1418,17 +1422,17 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "protobuf"
|
||||
version = "6.33.2"
|
||||
version = "6.33.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/34/44/e49ecff446afeec9d1a66d6bbf9adc21e3c7cea7803a920ca3773379d4f6/protobuf-6.33.2.tar.gz", hash = "sha256:56dc370c91fbb8ac85bc13582c9e373569668a290aa2e66a590c2a0d35ddb9e4", size = 444296, upload-time = "2025-12-06T00:17:53.311Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/cc/5c/f912bdebdd4af4160da6a2c2b1b3aaa1b8c578d0243ba8f694f93c7095f0/protobuf-6.33.3.tar.gz", hash = "sha256:c8794debeb402963fddff41a595e1f649bcd76616ba56c835645cab4539e810e", size = 444318, upload-time = "2026-01-09T23:05:02.79Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/91/1e3a34881a88697a7354ffd177e8746e97a722e5e8db101544b47e84afb1/protobuf-6.33.2-cp310-abi3-win32.whl", hash = "sha256:87eb388bd2d0f78febd8f4c8779c79247b26a5befad525008e49a6955787ff3d", size = 425603, upload-time = "2025-12-06T00:17:41.114Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/64/20/4d50191997e917ae13ad0a235c8b42d8c1ab9c3e6fd455ca16d416944355/protobuf-6.33.2-cp310-abi3-win_amd64.whl", hash = "sha256:fc2a0e8b05b180e5fc0dd1559fe8ebdae21a27e81ac77728fb6c42b12c7419b4", size = 436930, upload-time = "2025-12-06T00:17:43.278Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/ca/7e485da88ba45c920fb3f50ae78de29ab925d9e54ef0de678306abfbb497/protobuf-6.33.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d9b19771ca75935b3a4422957bc518b0cecb978b31d1dd12037b088f6bcc0e43", size = 427621, upload-time = "2025-12-06T00:17:44.445Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7d/4f/f743761e41d3b2b2566748eb76bbff2b43e14d5fcab694f494a16458b05f/protobuf-6.33.2-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:b5d3b5625192214066d99b2b605f5783483575656784de223f00a8d00754fc0e", size = 324460, upload-time = "2025-12-06T00:17:45.678Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b1/fa/26468d00a92824020f6f2090d827078c09c9c587e34cbfd2d0c7911221f8/protobuf-6.33.2-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:8cd7640aee0b7828b6d03ae518b5b4806fdfc1afe8de82f79c3454f8aef29872", size = 339168, upload-time = "2025-12-06T00:17:46.813Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/56/13/333b8f421738f149d4fe5e49553bc2a2ab75235486259f689b4b91f96cec/protobuf-6.33.2-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:1f8017c48c07ec5859106533b682260ba3d7c5567b1ca1f24297ce03384d1b4f", size = 323270, upload-time = "2025-12-06T00:17:48.253Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/15/4f02896cc3df04fc465010a4c6a0cd89810f54617a32a70ef531ed75d61c/protobuf-6.33.2-py3-none-any.whl", hash = "sha256:7636aad9bb01768870266de5dc009de2d1b936771b38a793f73cbbf279c91c5c", size = 170501, upload-time = "2025-12-06T00:17:52.211Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/56/2a41b9dcc3b92fa672bb89610608f4fd4f71bec075d314956710503b29f5/protobuf-6.33.3-cp310-abi3-win32.whl", hash = "sha256:b4046f9f2ede57ad5b1d9917baafcbcad42f8151a73c755a1e2ec9557b0a764f", size = 425597, upload-time = "2026-01-09T23:04:50.11Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/23/07/1f1300fe7d204fd7aaabd9a0aafd54e6358de833b783f5bd161614e8e1e4/protobuf-6.33.3-cp310-abi3-win_amd64.whl", hash = "sha256:1fd18f030ae9df97712fbbb0849b6e54c63e3edd9b88d8c3bb4771f84d8db7a4", size = 436945, upload-time = "2026-01-09T23:04:51.921Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/5d/0ef28dded98973a26443a6a7bc49bff6206be8c57dc1d1e28e6c1147b879/protobuf-6.33.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:648b7b0144222eb06cf529a3d7b01333c5f30b4196773b682d388f04db373759", size = 427594, upload-time = "2026-01-09T23:04:53.358Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c5/46/551c69b6ff1957bd703654342bfb776bb97db400bc80afc56fbb64e7c11d/protobuf-6.33.3-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:08a6ca12f60ba99097dd3625ef4275280f99c9037990e47ce9368826b159b890", size = 324469, upload-time = "2026-01-09T23:04:54.332Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ca/6d/ade1cca06c64a421ee9745e082671465ead28164c809efaf2c15bc93f9a0/protobuf-6.33.3-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:642fce7187526c98683c79a3ad68e5d646a5ef5eb004582fe123fc9a33a9456b", size = 339242, upload-time = "2026-01-09T23:04:55.347Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/8c/6522b8e543ece46f645911c3cebe361d8460134c0fee02ddcf70ebf32999/protobuf-6.33.3-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:6fa9b5f4baa12257542273e5e6f3c3d3867b30bc2770c14ad9ac8315264bf986", size = 323298, upload-time = "2026-01-09T23:04:56.866Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/b9/067b8a843569d5605ba6f7c039b9319720a974f82216cd623e13186d3078/protobuf-6.33.3-py3-none-any.whl", hash = "sha256:c2bf221076b0d463551efa2e1319f08d4cffcc5f0d864614ccd3d0e77a637794", size = 170518, upload-time = "2026-01-09T23:05:01.227Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1820,28 +1824,28 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.14.10"
|
||||
version = "0.14.11"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/57/08/52232a877978dd8f9cf2aeddce3e611b40a63287dfca29b6b8da791f5e8d/ruff-0.14.10.tar.gz", hash = "sha256:9a2e830f075d1a42cd28420d7809ace390832a490ed0966fe373ba288e77aaf4", size = 5859763, upload-time = "2025-12-18T19:28:57.98Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d4/77/9a7fe084d268f8855d493e5031ea03fa0af8cc05887f638bf1c4e3363eb8/ruff-0.14.11.tar.gz", hash = "sha256:f6dc463bfa5c07a59b1ff2c3b9767373e541346ea105503b4c0369c520a66958", size = 5993417, upload-time = "2026-01-08T19:11:58.322Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/60/01/933704d69f3f05ee16ef11406b78881733c186fe14b6a46b05cfcaf6d3b2/ruff-0.14.10-py3-none-linux_armv6l.whl", hash = "sha256:7a3ce585f2ade3e1f29ec1b92df13e3da262178df8c8bdf876f48fa0e8316c49", size = 13527080, upload-time = "2025-12-18T19:29:25.642Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/df/58/a0349197a7dfa603ffb7f5b0470391efa79ddc327c1e29c4851e85b09cc5/ruff-0.14.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:674f9be9372907f7257c51f1d4fc902cb7cf014b9980152b802794317941f08f", size = 13797320, upload-time = "2025-12-18T19:29:02.571Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7b/82/36be59f00a6082e38c23536df4e71cdbc6af8d7c707eade97fcad5c98235/ruff-0.14.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d85713d522348837ef9df8efca33ccb8bd6fcfc86a2cde3ccb4bc9d28a18003d", size = 12918434, upload-time = "2025-12-18T19:28:51.202Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/00/45c62a7f7e34da92a25804f813ebe05c88aa9e0c25e5cb5a7d23dd7450e3/ruff-0.14.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6987ebe0501ae4f4308d7d24e2d0fe3d7a98430f5adfd0f1fead050a740a3a77", size = 13371961, upload-time = "2025-12-18T19:29:04.991Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/31/a5906d60f0405f7e57045a70f2d57084a93ca7425f22e1d66904769d1628/ruff-0.14.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:16a01dfb7b9e4eee556fbfd5392806b1b8550c9b4a9f6acd3dbe6812b193c70a", size = 13275629, upload-time = "2025-12-18T19:29:21.381Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3e/60/61c0087df21894cf9d928dc04bcd4fb10e8b2e8dca7b1a276ba2155b2002/ruff-0.14.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7165d31a925b7a294465fa81be8c12a0e9b60fb02bf177e79067c867e71f8b1f", size = 14029234, upload-time = "2025-12-18T19:29:00.132Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/44/84/77d911bee3b92348b6e5dab5a0c898d87084ea03ac5dc708f46d88407def/ruff-0.14.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c561695675b972effb0c0a45db233f2c816ff3da8dcfbe7dfc7eed625f218935", size = 15449890, upload-time = "2025-12-18T19:28:53.573Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e9/36/480206eaefa24a7ec321582dda580443a8f0671fdbf6b1c80e9c3e93a16a/ruff-0.14.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4bb98fcbbc61725968893682fd4df8966a34611239c9fd07a1f6a07e7103d08e", size = 15123172, upload-time = "2025-12-18T19:29:23.453Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5c/38/68e414156015ba80cef5473d57919d27dfb62ec804b96180bafdeaf0e090/ruff-0.14.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f24b47993a9d8cb858429e97bdf8544c78029f09b520af615c1d261bf827001d", size = 14460260, upload-time = "2025-12-18T19:29:27.808Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b3/19/9e050c0dca8aba824d67cc0db69fb459c28d8cd3f6855b1405b3f29cc91d/ruff-0.14.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59aabd2e2c4fd614d2862e7939c34a532c04f1084476d6833dddef4afab87e9f", size = 14229978, upload-time = "2025-12-18T19:29:11.32Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/51/eb/e8dd1dd6e05b9e695aa9dd420f4577debdd0f87a5ff2fedda33c09e9be8c/ruff-0.14.10-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:213db2b2e44be8625002dbea33bb9c60c66ea2c07c084a00d55732689d697a7f", size = 14338036, upload-time = "2025-12-18T19:29:09.184Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/12/f3e3a505db7c19303b70af370d137795fcfec136d670d5de5391e295c134/ruff-0.14.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b914c40ab64865a17a9a5b67911d14df72346a634527240039eb3bd650e5979d", size = 13264051, upload-time = "2025-12-18T19:29:13.431Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/08/64/8c3a47eaccfef8ac20e0484e68e0772013eb85802f8a9f7603ca751eb166/ruff-0.14.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1484983559f026788e3a5c07c81ef7d1e97c1c78ed03041a18f75df104c45405", size = 13283998, upload-time = "2025-12-18T19:29:06.994Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/12/84/534a5506f4074e5cc0529e5cd96cfc01bb480e460c7edf5af70d2bcae55e/ruff-0.14.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:c70427132db492d25f982fffc8d6c7535cc2fd2c83fc8888f05caaa248521e60", size = 13601891, upload-time = "2025-12-18T19:28:55.811Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/1e/14c916087d8598917dbad9b2921d340f7884824ad6e9c55de948a93b106d/ruff-0.14.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5bcf45b681e9f1ee6445d317ce1fa9d6cba9a6049542d1c3d5b5958986be8830", size = 14336660, upload-time = "2025-12-18T19:29:16.531Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/1c/d7b67ab43f30013b47c12b42d1acd354c195351a3f7a1d67f59e54227ede/ruff-0.14.10-py3-none-win32.whl", hash = "sha256:104c49fc7ab73f3f3a758039adea978869a918f31b73280db175b43a2d9b51d6", size = 13196187, upload-time = "2025-12-18T19:29:19.006Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/9c/896c862e13886fae2af961bef3e6312db9ebc6adc2b156fe95e615dee8c1/ruff-0.14.10-py3-none-win_amd64.whl", hash = "sha256:466297bd73638c6bdf06485683e812db1c00c7ac96d4ddd0294a338c62fdc154", size = 14661283, upload-time = "2025-12-18T19:29:30.16Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/31/b0e29d572670dca3674eeee78e418f20bdf97fa8aa9ea71380885e175ca0/ruff-0.14.10-py3-none-win_arm64.whl", hash = "sha256:e51d046cf6dda98a4633b8a8a771451107413b0f07183b2bef03f075599e44e6", size = 13729839, upload-time = "2025-12-18T19:28:48.636Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/a6/a4c40a5aaa7e331f245d2dc1ac8ece306681f52b636b40ef87c88b9f7afd/ruff-0.14.11-py3-none-linux_armv6l.whl", hash = "sha256:f6ff2d95cbd335841a7217bdfd9c1d2e44eac2c584197ab1385579d55ff8830e", size = 12951208, upload-time = "2026-01-08T19:12:09.218Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5c/5c/360a35cb7204b328b685d3129c08aca24765ff92b5a7efedbdd6c150d555/ruff-0.14.11-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6f6eb5c1c8033680f4172ea9c8d3706c156223010b8b97b05e82c59bdc774ee6", size = 13330075, upload-time = "2026-01-08T19:12:02.549Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1b/9e/0cc2f1be7a7d33cae541824cf3f95b4ff40d03557b575912b5b70273c9ec/ruff-0.14.11-py3-none-macosx_11_0_arm64.whl", hash = "sha256:f2fc34cc896f90080fca01259f96c566f74069a04b25b6205d55379d12a6855e", size = 12257809, upload-time = "2026-01-08T19:12:00.366Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/e5/5faab97c15bb75228d9f74637e775d26ac703cc2b4898564c01ab3637c02/ruff-0.14.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53386375001773ae812b43205d6064dae49ff0968774e6befe16a994fc233caa", size = 12678447, upload-time = "2026-01-08T19:12:13.899Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1b/33/e9767f60a2bef779fb5855cab0af76c488e0ce90f7bb7b8a45c8a2ba4178/ruff-0.14.11-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a697737dce1ca97a0a55b5ff0434ee7205943d4874d638fe3ae66166ff46edbe", size = 12758560, upload-time = "2026-01-08T19:11:42.55Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/84/4c6cf627a21462bb5102f7be2a320b084228ff26e105510cd2255ea868e5/ruff-0.14.11-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6845ca1da8ab81ab1dce755a32ad13f1db72e7fba27c486d5d90d65e04d17b8f", size = 13599296, upload-time = "2026-01-08T19:11:30.371Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/88/e1/92b5ed7ea66d849f6157e695dc23d5d6d982bd6aa8d077895652c38a7cae/ruff-0.14.11-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:e36ce2fd31b54065ec6f76cb08d60159e1b32bdf08507862e32f47e6dde8bcbf", size = 15048981, upload-time = "2026-01-08T19:12:04.742Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/61/df/c1bd30992615ac17c2fb64b8a7376ca22c04a70555b5d05b8f717163cf9f/ruff-0.14.11-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:590bcc0e2097ecf74e62a5c10a6b71f008ad82eb97b0a0079e85defe19fe74d9", size = 14633183, upload-time = "2026-01-08T19:11:40.069Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/04/e9/fe552902f25013dd28a5428a42347d9ad20c4b534834a325a28305747d64/ruff-0.14.11-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:53fe71125fc158210d57fe4da26e622c9c294022988d08d9347ec1cf782adafe", size = 14050453, upload-time = "2026-01-08T19:11:37.555Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ae/93/f36d89fa021543187f98991609ce6e47e24f35f008dfe1af01379d248a41/ruff-0.14.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a35c9da08562f1598ded8470fcfef2afb5cf881996e6c0a502ceb61f4bc9c8a3", size = 13757889, upload-time = "2026-01-08T19:12:07.094Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/9f/c7fb6ecf554f28709a6a1f2a7f74750d400979e8cd47ed29feeaa1bd4db8/ruff-0.14.11-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:0f3727189a52179393ecf92ec7057c2210203e6af2676f08d92140d3e1ee72c1", size = 13955832, upload-time = "2026-01-08T19:11:55.064Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/db/a0/153315310f250f76900a98278cf878c64dfb6d044e184491dd3289796734/ruff-0.14.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:eb09f849bd37147a789b85995ff734a6c4a095bed5fd1608c4f56afc3634cde2", size = 12586522, upload-time = "2026-01-08T19:11:35.356Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2f/2b/a73a2b6e6d2df1d74bf2b78098be1572191e54bec0e59e29382d13c3adc5/ruff-0.14.11-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:c61782543c1231bf71041461c1f28c64b961d457d0f238ac388e2ab173d7ecb7", size = 12724637, upload-time = "2026-01-08T19:11:47.796Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/41/09100590320394401cd3c48fc718a8ba71c7ddb1ffd07e0ad6576b3a3df2/ruff-0.14.11-py3-none-musllinux_1_2_i686.whl", hash = "sha256:82ff352ea68fb6766140381748e1f67f83c39860b6446966cff48a315c3e2491", size = 13145837, upload-time = "2026-01-08T19:11:32.87Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/d8/e035db859d1d3edf909381eb8ff3e89a672d6572e9454093538fe6f164b0/ruff-0.14.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:728e56879df4ca5b62a9dde2dd0eb0edda2a55160c0ea28c4025f18c03f86984", size = 13850469, upload-time = "2026-01-08T19:12:11.694Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4e/02/bb3ff8b6e6d02ce9e3740f4c17dfbbfb55f34c789c139e9cd91985f356c7/ruff-0.14.11-py3-none-win32.whl", hash = "sha256:337c5dd11f16ee52ae217757d9b82a26400be7efac883e9e852646f1557ed841", size = 12851094, upload-time = "2026-01-08T19:11:45.163Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/58/f1/90ddc533918d3a2ad628bc3044cdfc094949e6d4b929220c3f0eb8a1c998/ruff-0.14.11-py3-none-win_amd64.whl", hash = "sha256:f981cea63d08456b2c070e64b79cb62f951aa1305282974d4d5216e6e0178ae6", size = 14001379, upload-time = "2026-01-08T19:11:52.591Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/1c/1dbe51782c0e1e9cfce1d1004752672d2d4629ea46945d19d731ad772b3b/ruff-0.14.11-py3-none-win_arm64.whl", hash = "sha256:649fb6c9edd7f751db276ef42df1f3df41c38d67d199570ae2a7bd6cbc3590f0", size = 12938644, upload-time = "2026-01-08T19:11:50.027Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1916,14 +1920,14 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "starlette"
|
||||
version = "0.50.0"
|
||||
version = "0.51.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ba/b8/73a0e6a6e079a9d9cfa64113d771e421640b6f679a52eeb9b32f72d871a1/starlette-0.50.0.tar.gz", hash = "sha256:a2a17b22203254bcbc2e1f926d2d55f3f9497f769416b3190768befe598fa3ca", size = 2646985, upload-time = "2025-11-01T15:25:27.516Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e7/65/5a1fadcc40c5fdc7df421a7506b79633af8f5d5e3a95c3e72acacec644b9/starlette-0.51.0.tar.gz", hash = "sha256:4c4fda9b1bc67f84037d3d14a5112e523509c369d9d47b111b2f984b0cc5ba6c", size = 2647658, upload-time = "2026-01-10T20:23:15.043Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/52/1064f510b141bd54025f9b55105e26d1fa970b9be67ad766380a3c9b74b0/starlette-0.50.0-py3-none-any.whl", hash = "sha256:9e5391843ec9b6e472eed1365a78c8098cfceb7a74bfd4d6b1c0c0095efb3bca", size = 74033, upload-time = "2025-11-01T15:25:25.461Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/18/c4/09985a03dba389d4fe16a9014147a7b02fa76ef3519bf5846462a485876d/starlette-0.51.0-py3-none-any.whl", hash = "sha256:fb460a3d6fd3c958d729fdd96aee297f89a51b0181f16401fe8fd4cb6129165d", size = 74133, upload-time = "2026-01-10T20:23:13.445Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2070,33 +2074,33 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "urllib3"
|
||||
version = "2.6.2"
|
||||
version = "2.6.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1e/24/a2a2ed9addd907787d7aa0355ba36a6cadf1768b934c652ea78acbd59dcd/urllib3-2.6.2.tar.gz", hash = "sha256:016f9c98bb7e98085cb2b4b17b87d2c702975664e4f060c6532e64d1c1a5e797", size = 432930, upload-time = "2025-12-11T15:56:40.252Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6661c76461e25f024d5a38a46f04aaca912426a2b1d3/urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed", size = 435556, upload-time = "2026-01-07T16:24:43.925Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/6d/b9/4095b668ea3678bf6a0af005527f39de12fb026516fb3df17495a733b7f8/urllib3-2.6.2-py3-none-any.whl", hash = "sha256:ec21cddfe7724fc7cb4ba4bea7aa8e2ef36f607a4bab81aa6ce42a13dc3f03dd", size = 131182, upload-time = "2025-12-11T15:56:38.584Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "uuid-utils"
|
||||
version = "0.12.0"
|
||||
version = "0.13.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0b/0e/512fb221e4970c2f75ca9dae412d320b7d9ddc9f2b15e04ea8e44710396c/uuid_utils-0.12.0.tar.gz", hash = "sha256:252bd3d311b5d6b7f5dfce7a5857e27bb4458f222586bb439463231e5a9cbd64", size = 20889, upload-time = "2025-12-01T17:29:55.494Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/fe/8a/17b11768dcb473d3a255c02ffdd94fbd1b345c906efea0a39124dcbaed52/uuid_utils-0.13.0.tar.gz", hash = "sha256:4c17df6427a9e23a4cd7fb9ee1efb53b8abb078660b9bdb2524ca8595022dfe1", size = 21921, upload-time = "2026-01-08T15:48:10.841Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/43/de5cd49a57b6293b911b6a9a62fc03e55db9f964da7d5882d9edbee1e9d2/uuid_utils-0.12.0-cp39-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:3b9b30707659292f207b98f294b0e081f6d77e1fbc760ba5b41331a39045f514", size = 603197, upload-time = "2025-12-01T17:29:30.104Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/fa/5fd1d8c9234e44f0c223910808cde0de43bb69f7df1349e49b1afa7f2baa/uuid_utils-0.12.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:add3d820c7ec14ed37317375bea30249699c5d08ff4ae4dbee9fc9bce3bfbf65", size = 305168, upload-time = "2025-12-01T17:29:31.384Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/c6/8633ac9942bf9dc97a897b5154e5dcffa58816ec4dd780b3b12b559ff05c/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b8fce83ecb3b16af29c7809669056c4b6e7cc912cab8c6d07361645de12dd79", size = 340580, upload-time = "2025-12-01T17:29:32.362Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/88/8a61307b04b4da1c576373003e6d857a04dade52ab035151d62cb84d5cb5/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec921769afcb905035d785582b0791d02304a7850fbd6ce924c1a8976380dfc6", size = 346771, upload-time = "2025-12-01T17:29:33.708Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/fb/aab2dcf94b991e62aa167457c7825b9b01055b884b888af926562864398c/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6f3b060330f5899a92d5c723547dc6a95adef42433e9748f14c66859a7396664", size = 474781, upload-time = "2025-12-01T17:29:35.237Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/7a/dbd5e49c91d6c86dba57158bbfa0e559e1ddf377bb46dcfd58aea4f0d567/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:908dfef7f0bfcf98d406e5dc570c25d2f2473e49b376de41792b6e96c1d5d291", size = 343685, upload-time = "2025-12-01T17:29:36.677Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/19/8c4b1d9f450159733b8be421a4e1fb03533709b80ed3546800102d085572/uuid_utils-0.12.0-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4c6a24148926bd0ca63e8a2dabf4cc9dc329a62325b3ad6578ecd60fbf926506", size = 366482, upload-time = "2025-12-01T17:29:37.979Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/82/43/c79a6e45687647f80a159c8ba34346f287b065452cc419d07d2212d38420/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:64a91e632669f059ef605f1771d28490b1d310c26198e46f754e8846dddf12f4", size = 523132, upload-time = "2025-12-01T17:29:39.293Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/a2/b2d75a621260a40c438aa88593827dfea596d18316520a99e839f7a5fb9d/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:93c082212470bb4603ca3975916c205a9d7ef1443c0acde8fbd1e0f5b36673c7", size = 614218, upload-time = "2025-12-01T17:29:40.315Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/6b/ba071101626edd5a6dabf8525c9a1537ff3d885dbc210540574a03901fef/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:431b1fb7283ba974811b22abd365f2726f8f821ab33f0f715be389640e18d039", size = 546241, upload-time = "2025-12-01T17:29:41.656Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/12/9a942b81c0923268e6d85bf98d8f0a61fcbcd5e432fef94fdf4ce2ef8748/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2ffd7838c40149100299fa37cbd8bab5ee382372e8e65a148002a37d380df7c8", size = 511842, upload-time = "2025-12-01T17:29:43.107Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a9/a7/c326f5163dd48b79368b87d8a05f5da4668dd228a3f5ca9d79d5fee2fc40/uuid_utils-0.12.0-cp39-abi3-win32.whl", hash = "sha256:487f17c0fee6cbc1d8b90fe811874174a9b1b5683bf2251549e302906a50fed3", size = 179088, upload-time = "2025-12-01T17:29:44.492Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/92/41c8734dd97213ee1d5ae435cf4499705dc4f2751e3b957fd12376f61784/uuid_utils-0.12.0-cp39-abi3-win_amd64.whl", hash = "sha256:9598e7c9da40357ae8fffc5d6938b1a7017f09a1acbcc95e14af8c65d48c655a", size = 183003, upload-time = "2025-12-01T17:29:45.47Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/f9/52ab0359618987331a1f739af837d26168a4b16281c9c3ab46519940c628/uuid_utils-0.12.0-cp39-abi3-win_arm64.whl", hash = "sha256:c9bea7c5b2aa6f57937ebebeee4d4ef2baad10f86f1b97b58a3f6f34c14b4e84", size = 182975, upload-time = "2025-12-01T17:29:46.444Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/85/b8/d40848ca22781f206c60a1885fc737d2640392bd6b5792d455525accd89c/uuid_utils-0.13.0-cp39-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:83628283e977fb212e756bc055df8fdd2f9f589a2e539ba1abe755b8ce8df7a4", size = 602130, upload-time = "2026-01-08T15:47:34.877Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/b9/00a944b8096632ea12638181f8e294abcde3e3b8b5e29b777f809896f6ae/uuid_utils-0.13.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:c47638ed6334ab19d80f73664f153b04bbb04ab8ce4298d10da6a292d4d21c47", size = 304213, upload-time = "2026-01-08T15:47:36.807Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/da/d7/07b36c33aef683b81c9afff3ec178d5eb39d325447a68c3c68a62e4abb32/uuid_utils-0.13.0-cp39-abi3-manylinux_2_24_aarch64.whl", hash = "sha256:b276b538c57733ed406948584912da422a604313c71479654848b84b9e19c9b0", size = 340624, upload-time = "2026-01-08T15:47:38.821Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7d/55/fcff2fff02a27866cb1a6614c9df2b3ace721f0a0aab2b7b8f5a7d4e4221/uuid_utils-0.13.0-cp39-abi3-manylinux_2_24_armv7l.whl", hash = "sha256:bdaf2b77e34b199cf04cde28399495fd1ed951de214a4ece1f3919b2f945bb06", size = 346705, upload-time = "2026-01-08T15:47:40.397Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/48/67438506c2bb8bee1b4b00d7c0b3ff866401b4790849bf591d654d4ea0bc/uuid_utils-0.13.0-cp39-abi3-manylinux_2_24_i686.whl", hash = "sha256:eb2f0baf81e82f9769a7684022dca8f3bf801ca1574a3e94df1876e9d6f9271e", size = 366023, upload-time = "2026-01-08T15:47:42.662Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8b/d7/2d91ce17f62fd764d593430de296b70843cc25229c772453f7261de9e6a8/uuid_utils-0.13.0-cp39-abi3-manylinux_2_24_ppc64le.whl", hash = "sha256:6be6c4d11275f5cc402a4fdba6c2b1ce45fd3d99bb78716cd1cc2cbf6802b2ce", size = 471149, upload-time = "2026-01-08T15:47:44.963Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/9a/aa0756186073ba84daf5704c150d41ede10eb3185d510e02532e2071550e/uuid_utils-0.13.0-cp39-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:77621cf6ceca7f42173a642a01c01c216f9eaec3b7b65d093d2d6a433ca0a83d", size = 342130, upload-time = "2026-01-08T15:47:46.331Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/b4/3191789f4dc3bed59d79cec90559821756297a25d7dc34d1bf7781577a75/uuid_utils-0.13.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9a5a9eb06c2bb86dd876cd7b2fe927fc8543d14c90d971581db6ffda4a02526f", size = 524128, upload-time = "2026-01-08T15:47:47.628Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/30/29839210a8fff9fc219bfa7c8d8cd115324e92618cba0cda090d54d3d321/uuid_utils-0.13.0-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:775347c6110fb71360df17aac74132d8d47c1dbe71233ac98197fc872a791fd2", size = 615872, upload-time = "2026-01-08T15:47:50.61Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/99/ed/15000c96a8bd8f5fd8efd622109bf52549ea0b366f8ce71c45580fa55878/uuid_utils-0.13.0-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:cf95f6370ad1a0910ee7b5ad5228fd19c4ae32fe3627389006adaf519408c41e", size = 581023, upload-time = "2026-01-08T15:47:52.776Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/67/c8/3f809fa2dc2ca4bd331c792a3c7d3e45ae2b709d85847a12b8b27d1d5f19/uuid_utils-0.13.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5a88e23e0b2f4203fefe2ccbca5736ee06fcad10e61b5e7e39c8d7904bc13300", size = 546715, upload-time = "2026-01-08T15:47:54.415Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/80/4f7c7efd734d1494397c781bd3d421688e9c187ae836e3174625b1ddf8b0/uuid_utils-0.13.0-cp39-abi3-win32.whl", hash = "sha256:3e4f2cc54e6a99c0551158100ead528479ad2596847478cbad624977064ffce3", size = 177650, upload-time = "2026-01-08T15:47:55.679Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/94/d05ab68622e66ad787a241dfe5ccc649b3af09f30eae977b9ee8f7046aaa/uuid_utils-0.13.0-cp39-abi3-win_amd64.whl", hash = "sha256:046cb2756e1597b3de22d24851b769913e192135830486a0a70bf41327f0360c", size = 183211, upload-time = "2026-01-08T15:47:57.604Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/69/37/674b3ce25cd715b831ea8ebbd828b74c40159f04c95d1bb963b2c876fe79/uuid_utils-0.13.0-cp39-abi3-win_arm64.whl", hash = "sha256:5447a680df6ef8a5a353976aaf4c97cc3a3a22b1ee13671c44227b921e3ae2a9", size = 183518, upload-time = "2026-01-08T15:47:59.148Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
Reference in New Issue
Block a user