67 lines
5.8 KiB
PHP
67 lines
5.8 KiB
PHP
<?php
|
|
header("Content-Type: application/json; charset=utf-8");
|
|
$input = json_decode(file_get_contents("php://input"), true) ?: [];
|
|
$task = $input["task"] ?? "";
|
|
$mode = $input["mode"] ?? "deep";
|
|
if (!$task) { echo json_encode(["error"=>"task required"]); exit; }
|
|
|
|
$secrets=[];
|
|
foreach(file("/etc/weval/secrets.env",2|4) as $l){if(strpos($l,"=")!==false){list($k,$v)=explode("=",$l,2);$secrets[trim($k)]=trim($v," \t\"'");}}
|
|
|
|
function callR1($msgs, $secrets, $max=800) {
|
|
$provs = [
|
|
["k"=>$secrets["SAMBANOVA_KEY"]??"","u"=>"https://api.sambanova.ai/v1/chat/completions","m"=>"DeepSeek-R1"],
|
|
["k"=>$secrets["GROQ_KEY"]??$secrets["GROQ_API_KEY"]??"","u"=>"https://api.groq.com/openai/v1/chat/completions","m"=>"deepseek-r1-distill-llama-70b"],
|
|
];
|
|
foreach ($provs as $p) {
|
|
if (empty($p["k"])) continue;
|
|
$ch = curl_init($p["u"]);
|
|
curl_setopt_array($ch, [CURLOPT_POST=>true,CURLOPT_POSTFIELDS=>json_encode(["model"=>$p["m"],"messages"=>$msgs,"max_tokens"=>$max]),CURLOPT_HTTPHEADER=>["Content-Type: application/json","Authorization: Bearer ".$p["k"]],CURLOPT_RETURNTRANSFER=>true,CURLOPT_TIMEOUT=>25]);
|
|
$r = curl_exec($ch); curl_close($ch);
|
|
$c2 = json_decode($r, true)["choices"][0]["message"]["content"] ?? "";
|
|
if ($c2) return $c2;
|
|
}
|
|
return "";
|
|
}
|
|
|
|
function callLLM($msgs, $key, $model="qwen-3-235b-a22b-instruct-2507", $url="https://api.cerebras.ai/v1/chat/completions", $max=1000) {
|
|
$ch = curl_init($url);
|
|
curl_setopt_array($ch, [CURLOPT_POST=>true,CURLOPT_POSTFIELDS=>json_encode(["model"=>$model,"messages"=>$msgs,"max_tokens"=>$max,"temperature"=>0.3]),CURLOPT_HTTPHEADER=>["Content-Type: application/json","Authorization: Bearer $key"],CURLOPT_RETURNTRANSFER=>true,CURLOPT_TIMEOUT=>15]);
|
|
$r = curl_exec($ch); curl_close($ch);
|
|
return json_decode($r, true)["choices"][0]["message"]["content"] ?? "";
|
|
}
|
|
|
|
$key = $secrets["CEREBRAS_API_KEY"] ?? "";
|
|
$groq = $secrets["GROQ_KEY"] ?? $secrets["GROQ_API_KEY"] ?? "";
|
|
|
|
if ($mode === "deep") {
|
|
// OPUS PATTERN: Decompose > Reason > Verify > Synthesize
|
|
$step1 = callLLM([["role"=>"system","content"=>"Decompose this task into 3-5 sub-problems. Be precise."],["role"=>"user","content"=>$task]], $key);
|
|
$step2 = callLLM([["role"=>"system","content"=>"You are an expert. Solve each sub-problem thoroughly."],["role"=>"user","content"=>"Sub-problems:\n$step1\n\nSolve each one with reasoning."]], $key);
|
|
$step3 = callLLM([["role"=>"system","content"=>"Verify this solution. Find errors or gaps. Score 1-10."],["role"=>"user","content"=>"Task: $task\nSolution:\n$step2"]], $groq, "llama-3.3-70b-versatile", "https://api.groq.com/openai/v1/chat/completions");
|
|
$step4 = callLLM([["role"=>"system","content"=>"Synthesize the final answer in French. Include the verification feedback."],["role"=>"user","content"=>"Task: $task\nSolution: $step2\nVerification: $step3"]], $key);
|
|
echo json_encode(["mode"=>"deep","task"=>$task,"decomposition"=>substr($step1,0,300),"solution"=>substr($step2,0,500),"verification"=>substr($step3,0,300),"synthesis"=>$step4], JSON_UNESCAPED_UNICODE);
|
|
|
|
} elseif ($mode === "code") {
|
|
// CODE GEN: Spec > Generate > Review > Fix
|
|
$spec = callLLM([["role"=>"system","content"=>"Write a detailed technical spec for this code request."],["role"=>"user","content"=>$task]], $key);
|
|
$code = callLLM([["role"=>"system","content"=>"Generate complete, production-ready code based on this spec. Include error handling."],["role"=>"user","content"=>$spec]], $key);
|
|
$review = callLLM([["role"=>"system","content"=>"Code review: find bugs, security issues, performance problems. Be critical."],["role"=>"user","content"=>"Spec: $spec\nCode:\n$code"]], $groq, "llama-3.3-70b-versatile", "https://api.groq.com/openai/v1/chat/completions");
|
|
echo json_encode(["mode"=>"code","spec"=>substr($spec,0,300),"code"=>$code,"review"=>$review], JSON_UNESCAPED_UNICODE);
|
|
|
|
} elseif ($mode === "architect") {
|
|
// ARCHITECTURE: Requirements > Design > Tradeoffs > Decision
|
|
$reqs = callLLM([["role"=>"system","content"=>"Extract requirements and constraints from this request."],["role"=>"user","content"=>$task]], $key);
|
|
$design = callLLM([["role"=>"system","content"=>"Design 2-3 architecture options. Include diagrams in text."],["role"=>"user","content"=>"Requirements:\n$reqs"]], $key);
|
|
$tradeoffs = callLLM([["role"=>"system","content"=>"Analyze tradeoffs between options. Consider: scalability, cost, complexity, maintenance."],["role"=>"user","content"=>"Options:\n$design"]], $groq, "llama-3.3-70b-versatile", "https://api.groq.com/openai/v1/chat/completions");
|
|
echo json_encode(["mode"=>"architect","requirements"=>substr($reqs,0,300),"designs"=>substr($design,0,500),"tradeoffs"=>$tradeoffs], JSON_UNESCAPED_UNICODE);
|
|
|
|
} elseif ($mode === "multiagent") {
|
|
// MULTI-AGENT: 3 experts debate
|
|
$expert1 = callLLM([["role"=>"system","content"=>"You are a senior backend engineer. Give your expert opinion."],["role"=>"user","content"=>$task]], $key);
|
|
$expert2 = callLLM([["role"=>"system","content"=>"You are a security architect. Give your expert opinion."],["role"=>"user","content"=>$task]], $groq, "llama-3.3-70b-versatile", "https://api.groq.com/openai/v1/chat/completions");
|
|
$expert3 = callLLM([["role"=>"system","content"=>"You are a DevOps/SRE expert. Give your expert opinion."],["role"=>"user","content"=>$task]], $groq, "moonshotai/kimi-k2-instruct", "https://api.groq.com/openai/v1/chat/completions");
|
|
$synthesis = callLLM([["role"=>"system","content"=>"Synthesize these 3 expert opinions into a unified recommendation. Note agreements and disagreements. French."],["role"=>"user","content"=>"Backend: $expert1\n\nSecurity: $expert2\n\nDevOps: $expert3"]], $key);
|
|
echo json_encode(["mode"=>"multiagent","experts"=>3,"synthesis"=>$synthesis], JSON_UNESCAPED_UNICODE);
|
|
}
|