Files
weval-consulting/api/wevia-capabilities.json
2026-04-09 14:05:02 +02:00

230 lines
4.7 KiB
JSON

{
"version": "2.0",
"updated": "2026-03-28T13:00:00Z",
"total_capabilities": 42,
"core_services": {
"chat": {
"endpoint": "/api/weval-ia-fast.php",
"providers": [
"Groq",
"Cerebras",
"SambaNova",
"weval-brain",
"qwen3:8b",
"qwen3:4b"
],
"status": "live"
},
"chat_deep": {
"endpoint": "/api/weval-ia-full",
"providers": [
"Groq",
"Cerebras",
"SambaNova",
"Ollama"
],
"status": "live"
},
"tts": {
"endpoint": "/wevia-ia/wevia-tts.php",
"engine": "edge-tts",
"voices": {
"fr": "DeniseNeural",
"en": "AvaNeural",
"ar": "MounaNeural",
"darija": "MounaNeural",
"es": "ElviraNeural",
"de": "KatjaNeural",
"ja": "NanamiNeural",
"zh": "XiaoxiaoNeural",
"it": "ElsaNeural",
"pt": "FranciscaNeural"
},
"status": "live"
},
"mermaid": {
"renderer": "mermaid.js 10.x",
"output": "inline SVG",
"status": "live"
},
"logo_svg": {
"engine": "qwen3:8b local + Groq fallback",
"output": "artifact card + preview",
"status": "live"
},
"pdf_gen": {
"endpoint": "/api/weval-ia-full",
"engine": "opusPdfGenerate",
"status": "live"
},
"image_gen": {
"endpoint": "/api/weval-ia-fast.php",
"engine": "picsum seed",
"status": "live"
}
},
"auto_features": {
"mode_auto": {
"description": "Auto fast/deep routing by query complexity",
"function": "detectComplexity()",
"status": "live"
},
"lang_auto": {
"description": "Auto language detection by intent",
"function": "detectLang()",
"languages": [
"fr",
"en",
"darija",
"ar",
"es",
"de",
"it",
"pt",
"zh",
"ja",
"ko",
"ru",
"tr",
"hi"
],
"status": "live"
},
"no_lang_screen": true
},
"security": {
"nuclei": {
"endpoint": "/api/nuclei-scanner.php",
"version": "v3.3.7",
"status": "live"
},
"coderabbit": {
"endpoint": "/api/coderabbit-webhook.php",
"github": "Yacineutt",
"repos": 4,
"mattermost": "wired",
"status": "live"
},
"": {
"url": "https://auth.weval-consulting.com",
"status": "live"
}
},
"ai_models": {
"cloud": [
{
"name": "Groq llama-3.3-70b",
"role": "primary",
"speed": "2s"
},
{
"name": "Cerebras Qwen-235B",
"role": "secondary",
"speed": "3s"
},
{
"name": "SambaNova Llama-3.3-70B",
"role": "tertiary",
"speed": "5s"
}
],
"local_s204": [
{
"name": "weval-brain (qwen2 7.6B)",
"role": "sovereign fallback",
"size": "4.7GB"
},
{
"name": "qwen3:8b",
"role": "logo SVG gen",
"size": "5.2GB"
},
{
"name": "qwen3:4b",
"role": "fast fallback",
"size": "2.5GB"
},
{
"name": "glm4:9b",
"role": "multilingual",
"size": "5.5GB"
},
{
"name": "meditron:7b",
"role": "Ethica HCP",
"size": "3.8GB"
},
{
"name": "medllama2",
"role": "clinical Q&A",
"size": "3.8GB"
},
{
"name": "mistral",
"role": "legacy",
"size": "4.4GB"
},
{
"name": "granite4",
"role": "enterprise",
"size": "2.1GB"
},
{
"name": "qwen2.5:7b",
"role": "general",
"size": "4.7GB"
},
{
"name": "qwen3.5:0.8b",
"role": "ultra-fast",
"size": "1.0GB"
}
]
},
"tools_wired": {
"toolfk": {
"token": "configured",
"tools": 12,
"status": "token_ready"
},
"coderabbit": {
"status": "connected",
"repos": 4
},
"nuclei": {
"status": "ready"
},
"hermes_skills": {
"total": 27,
"categories": [
"diagramming",
"creative",
"media",
"research",
"software-development",
"productivity",
"mcp"
]
},
"deerflow": {
"version": "2.0",
"skills": 42,
"status": "live"
},
"kilo_cli": {
"version": "7.1.0",
"models": "500+",
"status": "installed"
}
},
"prompts": {
"master_prompt": "/var/www/weval/wevia-ia/prompts/opus-master-system.md",
"prompt_library": {
"total": 62,
"path": "/var/www/weval/wevia-ia/prompts/prompt-library.json"
},
"system_prompts": 24,
"agent_prompts": 10
},
"_refreshed": "2026-04-07T03:45:23.701947"
}