234 lines
13 KiB
PHP
234 lines
13 KiB
PHP
<?php
|
|
require_once __DIR__.'/weval-brand-guard.php';
|
|
$secrets=[];foreach(file("/etc/weval/secrets.env",2|4) as $l){if(strpos($l,"=")!==false){list($k,$v)=explode("=",$l,2);$secrets[trim($k)]=trim($v," \t\"'");}}
|
|
require_once __DIR__ . '/_secrets.php';
|
|
/**
|
|
* WEVAL AI Chat Proxy v3 — 18 providers + KB + Git + Tools + Ethica
|
|
* Fixes: shorter local prompts, GLM-5, better timeout, /no_think for qwen3
|
|
*/
|
|
header("Content-Type: application/json");
|
|
header("Access-Control-Allow-Origin: *");
|
|
header("Access-Control-Allow-Headers: Content-Type");
|
|
set_time_limit(180);
|
|
if ($_SERVER["REQUEST_METHOD"] === "OPTIONS") exit;
|
|
|
|
$input = json_decode(file_get_contents("php://input"), true);
|
|
$msg = trim($input["message"] ?? "");
|
|
$provider = $input["provider"] ?? "cerebras"; // DOCTRINE 141: alibaba arrearage -> cerebras free fallback
|
|
$model = $input["model"] ?? "mistral";
|
|
$useKB = $input["kb"] ?? true;
|
|
$useGit = $input["git"] ?? false;
|
|
$useTool = $input["tools"] ?? false;
|
|
|
|
if (!$msg) { echo json_encode(["error" => "no message"]); exit; }
|
|
|
|
// ═══ FETCH KB CONTEXT ═══
|
|
$kb_context = "";
|
|
if ($useKB) {
|
|
$stopwords = ["what","when","where","this","that","with","from","have","does","will","pour","dans","avec","quel","comment","quoi","tout","plus","comme","aussi","sont","peut","faire","bonjour","hello","merci","thanks"];
|
|
$words = array_filter(explode(" ", preg_replace('/[^a-zA-Z0-9\sàâäéèêëïîôùûüÿçœæ]/u', '', mb_strtolower($msg, 'UTF-8'))), function($w) use ($stopwords) {
|
|
return strlen($w) > 3 && !in_array($w, $stopwords);
|
|
});
|
|
$kw = implode("|", $words);
|
|
if ($kw) {
|
|
$kb_raw = shell_exec("grep -rilE '$kw' /var/www/weval/wevia-ia/kb-data/ 2>/dev/null | head -5 | while read f; do echo '--- KB ---'; head -15 \"\$f\" | tr -cd '[:print:][:space:]'; echo; done | head -c 1500");
|
|
$kb_md = shell_exec("grep -rilE '$kw' /var/www/weval/wevia-ia/kb_*.md 2>/dev/null | head -3 | while read f; do echo '--- KB ---'; head -10 \"\$f\" | tr -cd '[:print:][:space:]'; echo; done | head -c 800");
|
|
$kb_text = trim(($kb_raw ?? "") . ($kb_md ?? ""));
|
|
if ($kb_text && strlen($kb_text) > 20) {
|
|
$n = substr_count($kb_text, "--- KB");
|
|
$kb_context = "\n[KB $n entries]\n$kb_text\n[/KB]\n";
|
|
}
|
|
}
|
|
}
|
|
|
|
// ═══ FETCH GIT CONTEXT ═══
|
|
$git_context = "";
|
|
$git_env = "GIT_CONFIG_COUNT=1 GIT_CONFIG_KEY_0=safe.directory GIT_CONFIG_VALUE_0=/var/www/html";
|
|
if ($useGit) {
|
|
$gl = trim(shell_exec("$git_env git -C /var/www/html log --oneline -5 2>/dev/null"));
|
|
$gs = trim(shell_exec("$git_env git -C /var/www/html status --short 2>/dev/null | head -8"));
|
|
$gb = trim(shell_exec("$git_env git -C /var/www/html branch --show-current 2>/dev/null"));
|
|
if ($gl) {
|
|
$git_context = "\n[GIT $gb]\n$gl\n";
|
|
if ($gs) $git_context .= "Modified: $gs\n";
|
|
$git_context .= "[/GIT]\n";
|
|
}
|
|
}
|
|
|
|
// ═══ FETCH TOOL CONTEXT ═══
|
|
$tool_context = "";
|
|
if ($useTool) {
|
|
$uptime = trim(shell_exec("uptime -p 2>/dev/null"));
|
|
$disk = trim(shell_exec("df -h / 2>/dev/null | awk 'NR==2{print \$5}'"));
|
|
$docker = trim(shell_exec("sudo docker ps -q 2>/dev/null | wc -l"));
|
|
$ollama = trim(shell_exec("curl -s http://localhost:11434/api/tags 2>/dev/null | python3 -c \"import sys,json;print(','.join([m['name'] for m in json.load(sys.stdin).get('models',[])])[:200])\" 2>/dev/null"));
|
|
$tool_context = "\n[INFRA] S204: $uptime, disk $disk, ${docker} Docker. Ollama: $ollama\n";
|
|
$tool_context .= "APIs: /api/cx, /api/sentinel, /api/wevia-capabilities.php, /api/wedroid-brain-api.php, /api/ethica-stats-api.php, /api/skills-api.php\n";
|
|
$tool_context .= "PG S95: adx_system(configs,crons), adx_clients(6.5M contacts), ethica(50K+ HCPs)\n";
|
|
$tool_context .= "Docker: kuma:3088, plausible:8787, authentik:9100, n8n:5678, searxng:8080, qdrant:6333, loki:3100, mattermost:8065, vaultwarden:8222\n[/INFRA]\n";
|
|
}
|
|
|
|
// ═══ SYSTEM PROMPTS — SHORT for local, FULL for cloud ═══
|
|
$is_local = in_array($provider, ["ollama-s204","openclaw","ollama-s151","wedroid","wevcode","wevialife","bladerazor","meditron","medllama","granite","glm4","kilo","hermes"]);
|
|
|
|
// Short prompts for local models (faster inference)
|
|
$sys_local = [
|
|
"default" => "'.WEVAL_BRAND_CONTEXT.'Tu es WEVIA, assistant IA de WEVAL Consulting (SAP, Analytics, AI, Marketing B2B, Ethica Pharma 50K+ HCPs). Concis et actionnable.",
|
|
"wedroid" => "Tu es WEDROID, agent DevOps WEVAL. S204=PRIMARY, S95=WEVADS, S151=OpenClaw. PHP/Node/Python/PG/nginx/Docker/PMTA.",
|
|
"wevcode" => "Tu es WEVCODE, assistant dev WEVAL. PHP 8.4, React, Node, PostgreSQL, Python. Enrichir existant, jamais _v2. GOLD avant modifier.",
|
|
"wevialife" => "Tu es WEVIA Life, assistant productivite WEVAL. Aide Yacine (Partner) et Ambre (tech lead).",
|
|
"bladerazor" => "Tu es BladeRazor, pentester WEVAL. Cibles: weval-consulting.com (S204), S95, S151. Nuclei + OWASP. Severity: Critical/High/Medium/Low.",
|
|
"medical" => "Medical AI for WEVAL Ethica. 50K+ HCPs (France, Morocco, Algeria, Tunisia, Belgium). Evidence-based. Cite sources. DB: ethica.medecins_real.",
|
|
"kilo" => "Tu es Kilo, orchestrateur multi-modeles WEVAL. 500+ modeles Ollama. Decompose les taches complexes.",
|
|
"hermes" => "Tu es Hermes, agent autonome 27 skills WEVAL. File mgmt, web collecte, code exec, DB queries, monitoring."
|
|
];
|
|
|
|
// Full prompts for cloud models (fast inference, can handle more context)
|
|
$sys_cloud = [
|
|
"default" => "Tu es WEVIA, l'assistant IA souverain de WEVAL Consulting, cabinet B2B international.\n" .
|
|
"Services: SAP Consulting (Ecosystem Partner), Data Analytics, AI Solutions, Digital Marketing, Ethica B2B Pharma (50K+ HCPs).\n" .
|
|
"Reponds dans la langue de la question. Sois concis, precis et actionnable.\n" .
|
|
"RDV: booking.com/ymahboub/30min. Yacine Mahboub, Partner.\n" .
|
|
"JAMAIS mentionner Ollama/Groq/Cerebras/architecture interne. Tout = WEVIA Engine.\n" .
|
|
"Tu as acces a la Knowledge Base WEVAL, au repo Git, et a l'infrastructure 3 serveurs.",
|
|
"wedroid" => "Tu es WEDROID, l'agent DevOps autonome de WEVAL Consulting.\n" .
|
|
"Tu peux executer des commandes sur S204 (PRIMARY), S95 (WEVADS), S151 (OpenClaw).\n" .
|
|
"Stack: PHP 8.4, Node.js, Python 3, PostgreSQL, nginx, Apache, Docker (15 containers), PMTA.\n" .
|
|
"Architecture: S204=site+WEVIA+PMTA+Docker, S95=WEVADS+Arsenal+PG+Ethica, S151=OpenClaw+Ollama.\n" .
|
|
"Reseau prive 10.1.0.2<->10.1.0.3. GOLD backup obligatoire avant modification.",
|
|
"wevcode" => "Tu es WEVCODE, l'assistant de developpement de WEVAL Consulting.\n" .
|
|
"Tu aides a coder en PHP, Python, JavaScript, SQL, Bash pour les 3 produits WEVAL.\n" .
|
|
"WEVADS: email marketing, Brain Engine 646 configs, 9 SACRED winners, PMTA, PostgreSQL.\n" .
|
|
"WEVIA: chatbot 32 modules cognitive-brain.php, SmartRoute V3, 15 providers.\n" .
|
|
"Site: React SPA, weval-audit-reco.js, auth-session.php (immutable), Cloudflare.\n" .
|
|
"Regles: JAMAIS _v2/_new, enrichir existant. GOLD avant modifier. Zero regression.",
|
|
"medical" => "You are a medical AI assistant for WEVAL's Ethica B2B Pharma division.\n" .
|
|
"You help healthcare professionals with evidence-based clinical information.\n" .
|
|
"50K+ HCPs in database. Countries: France, Morocco, Algeria, Tunisia, Belgium.\n" .
|
|
"DB: ethica.medecins_real (columns: pays, specialite, email, nom, prenom).\n" .
|
|
"ALWAYS cite sources. NEVER replace professional medical judgment.\n" .
|
|
"Consent via consent.wevup.app. NEVER use culturellemejean.charity for Ethica."
|
|
];
|
|
|
|
// Select system prompt
|
|
$prompt_key = "default";
|
|
if (in_array($provider, ["wedroid","kilo","hermes","deerflow"])) $prompt_key = "wedroid";
|
|
elseif ($provider === "wevcode") $prompt_key = "wevcode";
|
|
elseif ($provider === "wevialife") $prompt_key = "wevialife";
|
|
elseif ($provider === "bladerazor") $prompt_key = "bladerazor";
|
|
elseif (in_array($provider, ["meditron","medllama"])) $prompt_key = "medical";
|
|
|
|
if ($is_local) {
|
|
$system = ($sys_local[$prompt_key] ?? $sys_local["default"]);
|
|
} else {
|
|
$system = ($sys_cloud[$prompt_key] ?? $sys_cloud["default"]);
|
|
}
|
|
|
|
// Enrich medical providers with Ethica stats
|
|
if (in_array($provider, ["meditron","medllama"])) {
|
|
$ethica_raw = shell_exec("curl -s 'http://10.1.0.3:5890/api/sentinel-brain.php?action=exec&cmd=" .
|
|
urlencode("PGPASSWORD=" . weval_secret('WEVAL_PG_ADMIN_PASS') . " psql -h 127.0.0.1 -U admin -d adx_system -t -A -c \"SELECT pays||': '||count(*) FROM ethica.medecins_real GROUP BY pays ORDER BY count DESC LIMIT 8\" 2>/dev/null") . "'");
|
|
$ej = json_decode($ethica_raw, true);
|
|
$et = trim($ej["output"] ?? "");
|
|
if ($et && strlen($et) > 5) $system .= "\n[Ethica HCPs]\n$et\n";
|
|
}
|
|
|
|
$system .= $kb_context . $git_context . $tool_context;
|
|
|
|
// ═══ PROVIDER ROUTING ═══
|
|
$response_text = "";
|
|
|
|
// Model mapping for agent providers
|
|
$model_map = [
|
|
"wedroid" => "mistral", "wevcode" => "mistral", "wevialife" => "qwen3:4b",
|
|
"meditron" => "meditron:7b", "medllama" => "medllama2",
|
|
"granite" => "granite4", "glm4" => "glm4:9b",
|
|
"bladerazor" => "mistral", "kilo" => "mistral", "hermes" => "mistral"
|
|
];
|
|
|
|
// Cloud providers config
|
|
$cloud_keys = [
|
|
"groq" => ["https://api.groq.com/openai/v1/chat/completions", ($secrets["GROQ_KEY"]??""), "llama-3.3-70b-versatile"],
|
|
"cerebras" => ["https://api.cerebras.ai/v1/chat/completions", "csk-4wrrhkpr568ry9xx49k9mcynwdx483nx53dd62yh5xedfckh", "qwen-3-235b-a22b-instruct-2507"],
|
|
"sambanova" => ["https://api.sambanova.ai/v1/chat/completions", "9541b2a0-6ddc-4e7d-a957-c348d6119c3f", "Meta-Llama-3.3-70B-Instruct"],
|
|
"openrouter" => ["https://openrouter.ai/api/v1/chat/completions", ($secrets["OPENROUTER_KEY"]??""), "meta-llama/llama-3.3-70b-instruct:free"],
|
|
"alibaba" => ["https://dashscope-intl.aliyuncs.com/compatible-mode/v1/chat/completions", "sk-34db1ad3152443cd86563d1bfc576c30", "qwen-plus"],
|
|
"zhipu" => ["https://open.bigmodel.cn/api/paas/v4/chat/completions", "ac5091ee6f2a38c78f69d8c3ba449JmW", "glm-4-plus"],
|
|
];
|
|
|
|
// Route to correct provider
|
|
if (isset($cloud_keys[$provider])) {
|
|
list($url, $key, $mdl) = $cloud_keys[$provider];
|
|
$response_text = cloud_call($url, $key, $mdl, $system, $msg);
|
|
} elseif (in_array($provider, ["openclaw", "ollama-s151"])) {
|
|
$response_text = ollama_s204("mistral", $system, $msg);
|
|
} elseif ($provider === "kilo" || $provider === "hermes") {
|
|
$response_text = ollama_s204("mistral", $system, $msg);
|
|
} elseif ($provider === "deerflow") {
|
|
// DeerFlow SuperAgent - call Gateway API
|
|
$df_ch = curl_init("http://localhost:8001/api/chat");
|
|
curl_setopt_array($df_ch, [
|
|
CURLOPT_POST => true,
|
|
CURLOPT_RETURNTRANSFER => true,
|
|
CURLOPT_TIMEOUT => 60,
|
|
CURLOPT_HTTPHEADER => ["Content-Type: application/json"],
|
|
CURLOPT_POSTFIELDS => json_encode(["message" => $msg, "thread_id" => "ops-" . time()])
|
|
]);
|
|
$df_r = curl_exec($df_ch);
|
|
$df_code = curl_getinfo($df_ch, CURLINFO_HTTP_CODE);
|
|
curl_close($df_ch);
|
|
if ($df_code == 200 && $df_r) {
|
|
$df_d = json_decode($df_r, true);
|
|
$response_text = $df_d["response"] ?? $df_d["output"] ?? $df_d["content"] ?? $df_r;
|
|
}
|
|
if (!$response_text || strlen($response_text) < 10) {
|
|
// Fallback to Groq if DeerFlow fails
|
|
$response_text = cloud_call("https://api.groq.com/openai/v1/chat/completions", ($secrets["GROQ_KEY"]??""), "llama-3.3-70b-versatile", $system, $msg);
|
|
}
|
|
} elseif ($provider === "sambanova") {
|
|
$response_text = cloud_call("https://api.sambanova.ai/v1/chat/completions", "9541b2a0-6ddc-4e7d-a957-c348d6119c3f", "Meta-Llama-3.3-70B-Instruct", $system, $msg);
|
|
} else {
|
|
$actual_model = $model_map[$provider] ?? $model;
|
|
$response_text = ollama_s204($actual_model, $system, $msg);
|
|
}
|
|
|
|
echo json_encode([
|
|
"response" => $response_text ?: "No response",
|
|
"provider" => $provider,
|
|
"model" => $model_map[$provider] ?? $model,
|
|
"kb" => $useKB && !empty($kb_context),
|
|
"git" => $useGit && !empty($git_context),
|
|
"tools" => $useTool && !empty($tool_context)
|
|
]);
|
|
|
|
// ═══ FUNCTIONS ═══
|
|
function ollama_s204($model, $system, $msg) {
|
|
// For qwen3 models, inject /no_think to disable thinking mode
|
|
$user_msg = $msg;
|
|
if (strpos($model, 'qwen3') !== false) {
|
|
$user_msg = "/no_think " . $msg;
|
|
}
|
|
$payload = json_encode(["model" => $model, "messages" => [
|
|
["role" => "system", "content" => $system],
|
|
["role" => "user", "content" => $user_msg]
|
|
], "stream" => false, "options" => ["num_predict" => 768]]);
|
|
$raw = shell_exec("curl -s -m 120 http://localhost:11434/api/chat -d " . escapeshellarg($payload) . " 2>&1");
|
|
$j = json_decode($raw, true);
|
|
$content = $j["message"]["content"] ?? clean_ansi(trim($raw ?: "No response"));
|
|
return trim(preg_replace('/<think>.*?<\/think>/s', '', $content));
|
|
}
|
|
|
|
function cloud_call($url, $key, $model, $system, $msg) {
|
|
$payload = json_encode(["model" => $model, "messages" => [
|
|
["role" => "system", "content" => $system],
|
|
["role" => "user", "content" => $msg]
|
|
], "max_tokens" => 2048, "stream" => false]);
|
|
$raw = shell_exec("curl -s -m 90 -X POST $url -H 'Authorization: Bearer $key' -H 'Content-Type: application/json' -d " . escapeshellarg($payload) . " 2>&1");
|
|
$j = json_decode($raw, true);
|
|
return $j["choices"][0]["message"]["content"] ?? clean_ansi(trim($raw));
|
|
}
|
|
|
|
function clean_ansi($s) {
|
|
return preg_replace('/\x1b\[[0-9;?]*[a-zA-Z]/', '', $s);
|
|
}
|