fix: .env symlink + stale cleanup
This commit is contained in:
183
config.yaml.bak-30mars
Normal file
183
config.yaml.bak-30mars
Normal file
@@ -0,0 +1,183 @@
|
||||
checkpointer:
|
||||
connection_string: checkpoints.db
|
||||
type: sqlite
|
||||
config_version: 3
|
||||
guardrails:
|
||||
enabled: true
|
||||
fail_closed: false
|
||||
provider:
|
||||
config:
|
||||
strict_mode: false
|
||||
use: deerflow.guardrails.security_monitor:SecurityMonitorProvider
|
||||
log_level: info
|
||||
memory:
|
||||
debounce_seconds: 30
|
||||
enabled: true
|
||||
fact_confidence_threshold: 0.7
|
||||
injection_enabled: true
|
||||
max_facts: 100
|
||||
max_injection_tokens: 2000
|
||||
model_name: null
|
||||
storage_path: memory.json
|
||||
models:
|
||||
- api_key: ollama
|
||||
base_url: http://localhost:11434/v1
|
||||
description: Modèle WEVAL fine-tuné souverain — zero cloud
|
||||
display_name: WEVAL Brain (Souverain)
|
||||
max_tokens: 4096
|
||||
model: weval-brain:latest
|
||||
name: weval-brain
|
||||
use: langchain_openai:ChatOpenAI
|
||||
- api_key: ollama
|
||||
base_url: http://localhost:11434/v1
|
||||
description: Qwen3 8B local souverain — zero cloud
|
||||
display_name: Qwen3 8B (Souverain)
|
||||
max_tokens: 4096
|
||||
model: qwen3:8b
|
||||
name: qwen3-8b-sovereign
|
||||
use: langchain_openai:ChatOpenAI
|
||||
- api_key: ollama
|
||||
base_url: http://localhost:11434/v1
|
||||
description: Qwen 2.5 7B local souverain — zero cloud
|
||||
display_name: Qwen 2.5 7B (Souverain)
|
||||
max_tokens: 4096
|
||||
model: qwen2.5:7b
|
||||
name: qwen2.5-7b-sovereign
|
||||
use: langchain_openai:ChatOpenAI
|
||||
- api_key: ollama
|
||||
base_url: http://localhost:11434/v1
|
||||
description: Mistral 7B local souverain — zero cloud
|
||||
display_name: Mistral 7B (Souverain)
|
||||
max_tokens: 4096
|
||||
model: mistral:latest
|
||||
name: mistral-sovereign
|
||||
use: langchain_openai:ChatOpenAI
|
||||
- api_key: ollama
|
||||
base_url: http://localhost:11434/v1
|
||||
description: IBM Granite 4 local souverain — zero cloud
|
||||
display_name: Granite 4 (Souverain)
|
||||
max_tokens: 4096
|
||||
model: granite4:latest
|
||||
name: granite4-sovereign
|
||||
use: langchain_openai:ChatOpenAI
|
||||
- api_key: ollama
|
||||
base_url: http://localhost:11434/v1
|
||||
description: GLM4 9B local souverain — zero cloud
|
||||
display_name: GLM4 9B (Souverain)
|
||||
max_tokens: 4096
|
||||
model: glm4:9b
|
||||
name: glm4-9b-sovereign
|
||||
use: langchain_openai:ChatOpenAI
|
||||
- api_key: ollama
|
||||
base_url: http://localhost:11434/v1
|
||||
description: Qwen3 4B local souverain léger — zero cloud
|
||||
display_name: Qwen3 4B (Souverain)
|
||||
max_tokens: 4096
|
||||
model: qwen3:4b
|
||||
name: qwen3-4b-sovereign
|
||||
use: langchain_openai:ChatOpenAI
|
||||
- api_key: ollama
|
||||
base_url: http://localhost:11434/v1
|
||||
description: MedLlama2 médical local souverain — zero cloud
|
||||
display_name: MedLlama2 (Souverain Médical)
|
||||
max_tokens: 4096
|
||||
model: medllama2:latest
|
||||
name: medllama2-sovereign
|
||||
use: langchain_openai:ChatOpenAI
|
||||
- api_key: ollama
|
||||
base_url: http://localhost:11434/v1
|
||||
description: Meditron 7B médical local souverain — zero cloud
|
||||
display_name: Meditron 7B (Souverain Médical)
|
||||
max_tokens: 4096
|
||||
model: meditron:7b
|
||||
name: meditron-sovereign
|
||||
use: langchain_openai:ChatOpenAI
|
||||
- api_key: ollama
|
||||
base_url: http://localhost:11434/v1
|
||||
description: Qwen3.5 0.8B ultra-léger souverain — zero cloud
|
||||
display_name: Qwen3.5 0.8B (Ultra-Léger Souverain)
|
||||
max_tokens: 2048
|
||||
model: qwen3.5:0.8b
|
||||
name: qwen3.5-0.8b-sovereign
|
||||
use: langchain_openai:ChatOpenAI
|
||||
- api_key: $SAMBANOVA_API_KEY
|
||||
base_url: https://api.sambanova.ai/v1
|
||||
description: Fast inference via SambaNova
|
||||
display_name: SambaNova Llama 70B
|
||||
max_tokens: 4096
|
||||
model: Meta-Llama-3.3-70B-Instruct
|
||||
name: sambanova-llama70b
|
||||
use: langchain_openai:ChatOpenAI
|
||||
- api_key: $GROQ_API_KEY
|
||||
base_url: https://api.groq.com/openai/v1
|
||||
description: Fast inference via Groq
|
||||
display_name: Groq Llama 70B
|
||||
max_tokens: 4096
|
||||
model: llama-3.3-70b-versatile
|
||||
name: groq-llama70b
|
||||
use: langchain_openai:ChatOpenAI
|
||||
- api_key: $CEREBRAS_API_KEY
|
||||
base_url: https://api.cerebras.ai/v1
|
||||
description: High quality via Cerebras
|
||||
display_name: Cerebras Qwen 235B
|
||||
max_tokens: 4096
|
||||
model: qwen-3-235b-a22b-instruct-2507
|
||||
name: cerebras-qwen
|
||||
use: langchain_openai:ChatOpenAI
|
||||
- api_key: $ALIBABA_API_KEY
|
||||
base_url: https://dashscope-intl.aliyuncs.com/compatible-mode/v1
|
||||
description: Alibaba Qwen Plus - fast, unlimited, sovereign fallback
|
||||
display_name: Alibaba Qwen Plus
|
||||
max_tokens: 8192
|
||||
model: qwen-plus
|
||||
name: alibaba-qwen-plus
|
||||
use: langchain_openai:ChatOpenAI
|
||||
sandbox:
|
||||
use: deerflow.sandbox.local:LocalSandboxProvider
|
||||
skills:
|
||||
container_path: /mnt/skills
|
||||
summarization:
|
||||
model: alibaba-qwen-plus
|
||||
title:
|
||||
enabled: true
|
||||
model: alibaba-qwen-plus
|
||||
token_usage:
|
||||
enabled: false
|
||||
tool_groups:
|
||||
- name: web
|
||||
- name: file:read
|
||||
- name: file:write
|
||||
- name: bash
|
||||
tool_search:
|
||||
enabled: false
|
||||
tools:
|
||||
- group: web
|
||||
max_results: 5
|
||||
name: web_search
|
||||
use: deerflow.community.searxng_search.tools:web_search_tool
|
||||
- group: web
|
||||
name: web_fetch
|
||||
timeout: 10
|
||||
use: deerflow.community.jina_ai.tools:web_fetch_tool
|
||||
- group: web
|
||||
max_results: 5
|
||||
name: image_search
|
||||
use: deerflow.community.image_search.tools:image_search_tool
|
||||
- group: file:read
|
||||
name: ls
|
||||
use: deerflow.sandbox.tools:ls_tool
|
||||
- group: file:read
|
||||
name: read_file
|
||||
use: deerflow.sandbox.tools:read_file_tool
|
||||
- group: file:write
|
||||
name: write_file
|
||||
use: deerflow.sandbox.tools:write_file_tool
|
||||
- group: file:write
|
||||
name: str_replace
|
||||
use: deerflow.sandbox.tools:str_replace_tool
|
||||
- group: bash
|
||||
name: bash
|
||||
use: deerflow.sandbox.tools:bash_tool
|
||||
- group: web
|
||||
name: notify_team
|
||||
use: deerflow.community.mattermost_notify.tools:notify_team_tool
|
||||
@@ -198,6 +198,33 @@ echo " - Gateway: logs/gateway.log"
|
||||
echo " - Frontend: logs/frontend.log"
|
||||
echo " - Nginx: logs/nginx.log"
|
||||
echo ""
|
||||
|
||||
echo "Press Ctrl+C to stop all services"
|
||||
|
||||
wait
|
||||
# ── Health monitoring loop ─────────────────────────────────────────────
|
||||
# Keeps serve.sh alive even if a sub-process crashes and restarts it.
|
||||
# This prevents systemd from seeing a non-zero exit and triggering a full
|
||||
# service restart cycle.
|
||||
|
||||
set +e # Allow sub-process failures without killing serve.sh
|
||||
|
||||
while true; do
|
||||
sleep 30
|
||||
|
||||
# Check LangGraph (port 2024)
|
||||
if ! ss -tlnp 2>/dev/null | grep -q ':2024 '; then
|
||||
echo "$(date) [MONITOR] LangGraph died, restarting..."
|
||||
(cd backend && NO_COLOR=1 uv run langgraph dev --no-browser --allow-blocking \
|
||||
--server-log-level info --no-reload >> ../logs/langgraph.log 2>&1) &
|
||||
./scripts/wait-for-port.sh 2024 30 "LangGraph" >> /dev/null 2>&1 || true
|
||||
fi
|
||||
|
||||
# Check Gateway (port 8001)
|
||||
if ! ss -tlnp 2>/dev/null | grep -q ':8001 '; then
|
||||
echo "$(date) [MONITOR] Gateway died, restarting..."
|
||||
(cd backend && PYTHONPATH=. uv run uvicorn app.gateway.app:app \
|
||||
--host 0.0.0.0 --port 8001 >> ../logs/gateway.log 2>&1) &
|
||||
./scripts/wait-for-port.sh 8001 20 "Gateway" >> /dev/null 2>&1 || true
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
40
skills/weval/claude-code-patterns/SKILL.md
Normal file
40
skills/weval/claude-code-patterns/SKILL.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Piebald-AI/claude-code-system-prompts
|
||||
|
||||
## Source
|
||||
- GitHub: https://github.com/Piebald-AI/claude-code-system-prompts
|
||||
- Stars: 6,400+
|
||||
- Language: JavaScript
|
||||
- License: MIT
|
||||
- Updated: Every Claude Code release (v2.1.87, 28 mars 2026)
|
||||
|
||||
## Description
|
||||
110+ system prompts extracted from Claude Code. Contains sub-agent architectures (Plan/Explore/Task/Verify), tool descriptions, security monitors, conversation compaction, memory consolidation, and skill creation patterns.
|
||||
|
||||
## WEVAL Relevance
|
||||
- Score: 65
|
||||
- Matched needs: prompt_eng, skill_agent, verification, security
|
||||
- Source: Facebook veille - Tex He Yit post
|
||||
|
||||
## Key Patterns Extracted for WEVAL
|
||||
1. **Verification Specialist** - Adversarial testing agent (IMPLEMENTED in DeerFlow verifier subagent)
|
||||
2. **Security Monitor** - Block/allow rules for autonomous actions (IMPLEMENTED in DeerFlow guardrails)
|
||||
3. **Anti-rationalization rules** - "Reading code is NOT verification. RUN IT." (IMPLEMENTED in DeerFlow prompt)
|
||||
4. **Conversation Compaction** - Context management for long sessions
|
||||
5. **Dream Memory Consolidation** - Multi-phase memory merging
|
||||
6. **Worker Fork Execution** - Parallel task execution pattern
|
||||
|
||||
## Integration
|
||||
- Status: INTEGRATED (30 mars 2026)
|
||||
- Target: verifier_agent + prompt_library
|
||||
- Server: S204
|
||||
- Files modified: prompt.py, verifier_agent.py, security_monitor.py
|
||||
|
||||
## Triggers
|
||||
- verification
|
||||
- prompt_eng
|
||||
- skill_agent
|
||||
- security
|
||||
|
||||
---
|
||||
Auto-discovered: 2026-03-30 via Facebook veille
|
||||
Integrated: 2026-03-30 by Ambre/Claude
|
||||
36
skills/weval/competitive-prompts/SKILL.md
Normal file
36
skills/weval/competitive-prompts/SKILL.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# x1xhlol/system-prompts-and-models-of-ai-tools
|
||||
|
||||
## Source
|
||||
- GitHub: https://github.com/x1xhlol/system-prompts-and-models-of-ai-tools
|
||||
- Stars: 8,000+
|
||||
- Language: Markdown
|
||||
- License: MIT
|
||||
|
||||
## Description
|
||||
Complete system prompts from 25+ AI tools: Cursor, Devin, Lovable, Windsurf, Replit, Manus, v0, Claude Code, Kiro, Augment Code, etc. Competitive intelligence for prompt architecture.
|
||||
|
||||
## WEVAL Relevance
|
||||
- Score: 55
|
||||
- Matched needs: prompt_eng, skill_agent, security
|
||||
- Source: Facebook veille
|
||||
|
||||
## Key Intel for WEVAL
|
||||
1. **Cursor** - How it handles multi-file edits and codebase understanding
|
||||
2. **Devin** - Full autonomous agent architecture
|
||||
3. **Lovable** - UI generation from description patterns
|
||||
4. **Windsurf** - IDE integration approach
|
||||
5. **Replit** - Sandbox execution patterns
|
||||
|
||||
## Integration
|
||||
- Status: INTEGRATED (30 mars 2026)
|
||||
- Target: prompt_library (competitive reference)
|
||||
- Server: S204
|
||||
- Path: /opt/wevads/vault/prompt-patterns/
|
||||
|
||||
## Triggers
|
||||
- prompt_eng
|
||||
- skill_agent
|
||||
- security
|
||||
|
||||
---
|
||||
Auto-discovered: 2026-03-30 via Facebook veille
|
||||
3
thread-cleanup.sh
Executable file
3
thread-cleanup.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
find /opt/deer-flow/backend/.deer-flow/threads -mindepth 1 -maxdepth 1 -type d -mtime +30 -exec rm -rf {} + 2>/dev/null
|
||||
echo "$(date) DeerFlow thread cleanup" >> /opt/deer-flow/logs/watchdog.log
|
||||
Reference in New Issue
Block a user