239 lines
8.7 KiB
PHP
Executable File
239 lines
8.7 KiB
PHP
Executable File
<?php
|
|
/**
|
|
* ╔══════════════════════════════════════════════════════════╗
|
|
* ║ WEVIA OPUS ORCHESTRATOR v1.0 ║
|
|
* ║ Central Brain — Ties CoT + RAG + Tools + Multi-Model ║
|
|
* ║ THE main entry point for all WEVIA intelligence ║
|
|
* ╚══════════════════════════════════════════════════════════╝
|
|
*/
|
|
|
|
require_once __DIR__ . '/chain-of-thought.php';
|
|
require_once __DIR__ . '/rag-engine.php';
|
|
require_once __DIR__ . '/tool-use-engine.php';
|
|
|
|
class OpusOrchestrator {
|
|
private $cot;
|
|
private $rag;
|
|
private $tools;
|
|
private $config;
|
|
private $metrics = [];
|
|
|
|
// Provider configurations loaded from DB
|
|
private $providers = [];
|
|
|
|
public function __construct(array $config = []) {
|
|
$this->config = array_merge([
|
|
'ollama_url' => 'http://127.0.0.1:11434',
|
|
'db_host' => '127.0.0.1',
|
|
'db_name' => 'wevia_db',
|
|
'db_user' => 'postgres',
|
|
'db_pass' => '',
|
|
'default_model' => 'deepseek-r1:32b',
|
|
'fast_model' => 'llama3.1:8b',
|
|
'embed_model' => 'nomic-embed-text',
|
|
'thinking_enabled' => true,
|
|
'rag_enabled' => true,
|
|
'tools_enabled' => true,
|
|
'max_tokens' => 4096,
|
|
'temperature' => 0.7
|
|
], $config);
|
|
|
|
$this->cot = new ChainOfThought();
|
|
$this->tools = new ToolUseEngine();
|
|
|
|
try {
|
|
$pdo = new PDO(
|
|
"pgsql:host={$this->config['db_host']};dbname={$this->config['db_name']}",
|
|
$this->config['db_user'], $this->config['db_pass']
|
|
);
|
|
$this->rag = new RAGEngine($pdo, $this->config['ollama_url']);
|
|
} catch (\Exception $e) {
|
|
$this->rag = null;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Main entry point — processes a user message with full Opus intelligence
|
|
*/
|
|
public function process(string $message, array $options = []): array {
|
|
$startTime = microtime(true);
|
|
$sessionId = $options['session_id'] ?? 'default';
|
|
$capability = $options['capability'] ?? 'normal';
|
|
$model = $options['model'] ?? null;
|
|
|
|
// Step 1: Classify the request
|
|
$analysis = $this->cot->reason($message);
|
|
$complexity = $analysis['complexity'];
|
|
|
|
// Step 2: Select the best model based on complexity
|
|
if (!$model) {
|
|
$model = $this->selectModel($complexity, $capability);
|
|
}
|
|
|
|
// Step 3: Build the system prompt
|
|
$systemPrompt = $this->buildSystemPrompt($capability, $complexity);
|
|
|
|
// Step 4: Get RAG context if enabled
|
|
$ragContext = '';
|
|
if ($this->config['rag_enabled'] && $this->rag) {
|
|
$ragContext = $this->rag->getContext($message);
|
|
}
|
|
|
|
// Step 5: Get tools prompt if enabled
|
|
$toolsPrompt = '';
|
|
if ($this->config['tools_enabled'] && $complexity !== 'low') {
|
|
$toolsPrompt = $this->tools->getToolsPrompt();
|
|
}
|
|
|
|
// Step 6: Compose the full prompt
|
|
$fullSystem = implode("\n\n", array_filter([
|
|
$systemPrompt,
|
|
$ragContext ? "CONTEXTE DE LA KNOWLEDGE BASE:\n$ragContext" : '',
|
|
$toolsPrompt
|
|
]));
|
|
|
|
// Step 7: Call the LLM
|
|
$response = $this->callLLM($model, $fullSystem, $message, $options);
|
|
|
|
// Step 8: Process tool calls if any
|
|
$toolResults = null;
|
|
if ($this->config['tools_enabled'] && str_contains($response, '<tool_call>')) {
|
|
$toolResults = $this->tools->processResponse($response);
|
|
|
|
if ($toolResults['had_tools']) {
|
|
// Re-call LLM with tool results
|
|
$toolContext = "Résultats des outils:\n" . json_encode($toolResults['results'], JSON_PRETTY_PRINT);
|
|
$response = $this->callLLM($model, $fullSystem,
|
|
$message . "\n\n" . $toolContext, $options);
|
|
}
|
|
}
|
|
|
|
// Step 9: Extract thinking and response
|
|
$thinking = '';
|
|
$cleanResponse = $response;
|
|
if (preg_match('/<think>(.*?)<\/think>/s', $response, $m)) {
|
|
$thinking = trim($m[1]);
|
|
$cleanResponse = trim(preg_replace('/<think>.*?<\/think>/s', '', $response));
|
|
}
|
|
|
|
$duration = round((microtime(true) - $startTime) * 1000);
|
|
|
|
$this->metrics[] = [
|
|
'model' => $model,
|
|
'complexity' => $complexity,
|
|
'duration_ms' => $duration,
|
|
'rag_used' => !empty($ragContext),
|
|
'tools_used' => $toolResults ? $toolResults['had_tools'] : false
|
|
];
|
|
|
|
return [
|
|
'response' => $cleanResponse,
|
|
'thinking' => $thinking,
|
|
'model' => $model,
|
|
'complexity' => $complexity,
|
|
'duration_ms' => $duration,
|
|
'rag_context' => !empty($ragContext),
|
|
'tools_used' => $toolResults,
|
|
'confidence' => $analysis['confidence'] ?? 0.8,
|
|
'steps' => count($analysis['steps'] ?? [])
|
|
];
|
|
}
|
|
|
|
/**
|
|
* Select the best model based on complexity and capability
|
|
*/
|
|
private function selectModel(string $complexity, string $capability): string {
|
|
$modelMap = [
|
|
'high' => [
|
|
'think' => 'deepseek-r1:32b',
|
|
'code' => 'qwen2.5-coder:32b',
|
|
'normal' => 'llama3.3:70b'
|
|
],
|
|
'medium' => [
|
|
'think' => 'deepseek-r1:14b',
|
|
'code' => 'qwen2.5-coder:14b',
|
|
'normal' => 'llama3.1:8b'
|
|
],
|
|
'low' => [
|
|
'think' => 'llama3.1:8b',
|
|
'code' => 'qwen2.5-coder:14b',
|
|
'normal' => 'llama3.1:8b'
|
|
]
|
|
];
|
|
|
|
return $modelMap[$complexity][$capability]
|
|
?? $modelMap[$complexity]['normal']
|
|
?? $this->config['default_model'];
|
|
}
|
|
|
|
/**
|
|
* Build system prompt based on capability
|
|
*/
|
|
private function buildSystemPrompt(string $capability, string $complexity): string {
|
|
$prompts = json_decode(
|
|
file_get_contents(__DIR__ . '/../prompts/opus-system-prompts.json'), true
|
|
) ?? [];
|
|
|
|
$base = $prompts['opus_core'] ?? '';
|
|
$weval = $prompts['weval_expert'] ?? '';
|
|
$guard = $prompts['hallucination_guard'] ?? '';
|
|
|
|
$specific = match($capability) {
|
|
'code' => $prompts['code_reviewer'] ?? '',
|
|
'email' => $prompts['email_marketing'] ?? '',
|
|
'sap' => $prompts['sap_consultant'] ?? '',
|
|
'data' => $prompts['data_analyst'] ?? '',
|
|
'security' => $prompts['cybersecurity'] ?? '',
|
|
default => ''
|
|
};
|
|
|
|
$thinking = '';
|
|
if ($complexity === 'high' && ($prompts['thinking_format'] ?? '')) {
|
|
$thinking = "\nFormat de réponse: Utilise <think>...</think> pour ton raisonnement, puis donne ta réponse finale.";
|
|
}
|
|
|
|
return implode("\n\n", array_filter([$base, $weval, $specific, $guard, $thinking]));
|
|
}
|
|
|
|
/**
|
|
* Call Ollama LLM
|
|
*/
|
|
private function callLLM(string $model, string $system, string $message, array $options = []): string {
|
|
$ch = curl_init("{$this->config['ollama_url']}/api/chat");
|
|
curl_setopt_array($ch, [
|
|
CURLOPT_POST => true,
|
|
CURLOPT_POSTFIELDS => json_encode([
|
|
'model' => $model,
|
|
'messages' => [
|
|
['role' => 'system', 'content' => $system],
|
|
['role' => 'user', 'content' => $message]
|
|
],
|
|
'stream' => false,
|
|
'options' => [
|
|
'temperature' => $options['temperature'] ?? $this->config['temperature'],
|
|
'num_predict' => $options['max_tokens'] ?? $this->config['max_tokens']
|
|
]
|
|
]),
|
|
CURLOPT_RETURNTRANSFER => true,
|
|
CURLOPT_TIMEOUT => 120,
|
|
CURLOPT_HTTPHEADER => ['Content-Type: application/json']
|
|
]);
|
|
|
|
$raw = curl_exec($ch);
|
|
$httpCode = curl_getinfo($ch, CURLINFO_HTTP_CODE);
|
|
curl_close($ch);
|
|
|
|
if ($httpCode !== 200) return "Erreur modèle $model: HTTP $httpCode";
|
|
|
|
$data = json_decode($raw, true);
|
|
return $data['message']['content'] ?? 'Pas de réponse';
|
|
}
|
|
|
|
/**
|
|
* Get performance metrics
|
|
*/
|
|
public function getMetrics(): array {
|
|
return $this->metrics;
|
|
}
|
|
}
|