Some checks failed
WEVAL NonReg / nonreg (push) Has been cancelled
A) SSE apprentissage universel (ai_learning_log)
- Tous chatbots logged apres chaque interaction (public + internal)
- experience jsonb: chatbot, intent, message_sample, backend, total_ms, memory_scope
- patterns_extracted jsonb: tests_passed, has_natural_lang, not_hallucinating, backend_ok
- outcome_success bool: true si tests >= 4 et backend ok
- Public: session_id EXCLUE (anonymise) · only aggregated patterns
- Internal: session_id INCLUS (lie aux messages persistants)
- Event SSE learned emit avant done
B) Session persistance localStorage / sessionStorage (20 chatbots)
- Public (wevia, wevia-widget) -> sessionStorage (per-tab, transient)
- Internal (18 chatbots) -> localStorage (cross-reload, persistent)
- Key: opus_chatbot_session_{BOT_ID}
- Format: opus-{BOT}-{timestamp}-{random6}
- URL SSE auto-includes &session=... param
- Reuse same session across clicks
Impact runtime:
- User ouvre blade-ai -> click badge -> 1st query save msg1+resp1 dans wevia_conversations
- Ferme page, reouvre blade-ai -> click badge -> session LOCAL reutilisee -> SSE load msg1+resp1 comme context
- PG table wevia_conversations grows avec cross-session conversation history
- PG table ai_learning_log grows avec outcome patterns pour meta-analyse
Chatbots apprennent:
- Quels intents mieux servis (outcome_success TRUE)
- Quels backends problematiques (not_hallucinating FALSE)
- Quel chatbot le plus utilise (groupby chatbot)
Tests live:
- blade-ai learn-test session: 1 row inserted · outcome=partial (backend faible)
- Event SSE learned emitted correctly
- localStorage persist: session key stored client-side
- Zero regression pour public (sessionStorage scope)
Doctrine respectee:
- Zero regression (try/catch silencieux · fail soft)
- Apprentissage universel (ALL chatbots, pas juste internes)
- Public anonymise (pas de session_id)
- Internal lie a conversation history
- Backup GOLD 20 chatbots + SSE
- chattr mgmt rigoureux
- Cause racine memoire cross-session resolue (localStorage)
308 lines
11 KiB
PHP
308 lines
11 KiB
PHP
<?php
|
|
/* ═══════════════════════════════════════════════════════════════════
|
|
CLAUDE PATTERN SSE · Opus v17 · 22-avr
|
|
|
|
SSE streaming version of claude-pattern-api
|
|
Emits phases in real-time (thinking → plan → RAG → execute → tests → response → critique)
|
|
|
|
Usage:
|
|
GET /api/claude-pattern-sse.php?message=X&chatbot=Y
|
|
|
|
Event types: thinking, plan, rag, execute, tests, response, critique, done
|
|
═══════════════════════════════════════════════════════════════════ */
|
|
header('Content-Type: text/event-stream; charset=utf-8');
|
|
header('Cache-Control: no-store');
|
|
header('X-Accel-Buffering: no');
|
|
header('Access-Control-Allow-Origin: *');
|
|
|
|
// Disable output buffering for real-time streaming
|
|
@ini_set('output_buffering', 'off');
|
|
@ini_set('zlib.output_compression', false);
|
|
while (ob_get_level() > 0) ob_end_flush();
|
|
ob_implicit_flush(true);
|
|
|
|
function emit($event, $data) {
|
|
$json = json_encode($data, JSON_UNESCAPED_UNICODE);
|
|
echo "event: {$event}\n";
|
|
echo "data: {$json}\n\n";
|
|
@flush();
|
|
}
|
|
|
|
$message = trim($_GET['message'] ?? $_POST['message'] ?? '');
|
|
$chatbot = $_GET['chatbot'] ?? $_POST['chatbot'] ?? 'wevia-master';
|
|
$session = $_GET['session'] ?? $_POST['session'] ?? 'sse-' . bin2hex(random_bytes(3));
|
|
$load_memory = isset($_GET['memory']) ? ($_GET['memory'] === '1') : true; // default on
|
|
$memory_scope = $_GET['memory_scope'] ?? 'persistent'; // persistent|session
|
|
// public widgets use session scope (transient)
|
|
$public_chatbots = ['wevia', 'wevia-widget', 'wevia-widget-public'];
|
|
if (in_array(($_GET['chatbot'] ?? ''), $public_chatbots)) {
|
|
$memory_scope = 'session';
|
|
}
|
|
|
|
if (!$message) {
|
|
emit('error', ['error' => 'message required']);
|
|
exit;
|
|
}
|
|
|
|
$BACKENDS = [
|
|
'wevia-master' => '/api/wevia-autonomous.php',
|
|
'wevia' => '/api/ambre-thinking.php',
|
|
'claw' => '/api/wevia-json-api.php',
|
|
'claw-chat' => '/api/wevia-json-api.php',
|
|
'claw-code' => '/api/weval-unified-pipeline.php',
|
|
'director' => '/api/wevia-autonomous.php',
|
|
'director-chat' => '/api/wevia-director.php',
|
|
'ethica' => '/api/ethica-brain.php',
|
|
'ethica-chatbot' => '/api/ethica-brain.php',
|
|
'openclaw' => '/api/openclaw-proxy.php',
|
|
'blade-ai' => '/api/blade-api.php',
|
|
'wevcode' => '/api/wevcode-superclaude.php',
|
|
'wevia-console' => '/api/wevia-json-api.php',
|
|
'wevia-widget' => '/api/wevia-json-api.php',
|
|
'wevia-cortex' => '/api/wevia-stream-api.php',
|
|
'wevia-chat' => '/api/wevia-autonomous.php',
|
|
'sovereign-claude' => '/api/wevia-json-api.php',
|
|
'weval-arena' => '/api/wevia-multi-provider.php',
|
|
'weval-arena-v2' => '/api/wevia-webchat-direct.php',
|
|
'l99-brain' => '/api/wevia-master-api.php',
|
|
'multiagent' => '/api/wevia-v83-multi-agent-orchestrator.php',
|
|
'auto' => '/api/opus5-autonomous-orchestrator-v3.php',
|
|
];
|
|
|
|
$backend = $BACKENDS[$chatbot] ?? $BACKENDS['wevia-master'];
|
|
|
|
// PHASE 1 · THINKING
|
|
$t1 = microtime(true);
|
|
emit('thinking', [
|
|
'status' => 'analyzing',
|
|
'message_length' => strlen($message),
|
|
'chatbot' => $chatbot,
|
|
'backend' => $backend,
|
|
]);
|
|
|
|
$msg_lower = strtolower($message);
|
|
$intent_map = [
|
|
'status' => ['status', 'état', 'sante', 'health', 'live'],
|
|
'action' => ['rotate', 'restart', 'deploy', 'run', 'exec'],
|
|
'analytics' => ['kpi', 'metric', 'combien', 'total', 'nombre'],
|
|
];
|
|
$intent = 'query';
|
|
foreach ($intent_map as $i => $kws) {
|
|
foreach ($kws as $kw) {
|
|
if (strpos($msg_lower, $kw) !== false) { $intent = $i; break 2; }
|
|
}
|
|
}
|
|
|
|
emit('thinking', [
|
|
'status' => 'complete',
|
|
'intent' => $intent,
|
|
'duration_ms' => round((microtime(true) - $t1) * 1000, 1),
|
|
]);
|
|
|
|
// PHASE 2 · PLAN
|
|
$t2 = microtime(true);
|
|
emit('plan', ['status' => 'building']);
|
|
|
|
$steps = match($intent) {
|
|
'status' => ['Query KPI sources', 'Aggregate health data', 'Format response'],
|
|
'action' => ['Validate safety', 'Execute command', 'Verify result'],
|
|
'analytics' => ['Fetch metrics', 'Calculate aggregates', 'Return structured data'],
|
|
default => ['Parse query', 'Route to backend', 'Format natural response'],
|
|
};
|
|
|
|
emit('plan', [
|
|
'status' => 'complete',
|
|
'steps' => $steps,
|
|
'duration_ms' => round((microtime(true) - $t2) * 1000, 1),
|
|
]);
|
|
|
|
// PHASE 3 · RAG (quick ping)
|
|
$t3 = microtime(true);
|
|
emit('rag', ['status' => 'searching']);
|
|
|
|
$rag_ctx = @file_get_contents('http://127.0.0.1:6333/collections', false, stream_context_create([
|
|
'http' => ['timeout' => 1]
|
|
]));
|
|
$rag_ok = $rag_ctx !== false;
|
|
|
|
emit('rag', [
|
|
'status' => $rag_ok ? 'qdrant_available' : 'skipped',
|
|
'duration_ms' => round((microtime(true) - $t3) * 1000, 1),
|
|
]);
|
|
|
|
// PHASE 3.5 · MEMORY LOAD (persistent for internal chatbots)
|
|
$history_msgs = [];
|
|
$history_count = 0;
|
|
if ($load_memory && $memory_scope === 'persistent' && $session) {
|
|
try {
|
|
$pg_r = @pg_connect("host=127.0.0.1 dbname=adx_system user=admin password=admin123 connect_timeout=2");
|
|
if ($pg_r) {
|
|
$rs = @pg_query_params($pg_r,
|
|
"SELECT user_message, assistant_response, created_at FROM wevia_conversations WHERE session_id=$1 ORDER BY created_at DESC LIMIT 10",
|
|
[$session]
|
|
);
|
|
if ($rs) {
|
|
while ($row = pg_fetch_assoc($rs)) {
|
|
$history_msgs[] = $row;
|
|
}
|
|
$history_count = count($history_msgs);
|
|
}
|
|
pg_close($pg_r);
|
|
}
|
|
} catch (Throwable $e) {}
|
|
}
|
|
emit('memory', [
|
|
'scope' => $memory_scope,
|
|
'loaded' => $history_count,
|
|
'persistent' => $memory_scope === 'persistent',
|
|
]);
|
|
|
|
// PHASE 4 · EXECUTE (REAL backend)
|
|
$t4 = microtime(true);
|
|
emit('execute', ['status' => 'calling_backend', 'backend' => $backend]);
|
|
|
|
$body = json_encode(['message' => $message, 'session' => $session]);
|
|
$ctx = stream_context_create([
|
|
'http' => [
|
|
'method' => 'POST',
|
|
'header' => "Content-Type: application/json\r\nHost: weval-consulting.com\r\n",
|
|
'content' => $body,
|
|
'timeout' => 25,
|
|
'ignore_errors' => true,
|
|
]
|
|
]);
|
|
|
|
$backend_resp = @file_get_contents('http://127.0.0.1' . $backend, false, $ctx);
|
|
$backend_data = @json_decode($backend_resp, true);
|
|
|
|
// Deep-dig text extraction
|
|
$text = '';
|
|
if ($backend_data) {
|
|
$text = $backend_data['final_response']
|
|
?? $backend_data['text']
|
|
?? $backend_data['response']
|
|
?? $backend_data['answer']
|
|
?? $backend_data['reply']
|
|
?? $backend_data['message']
|
|
?? $backend_data['thinking']
|
|
?? '';
|
|
if (is_array($text)) $text = json_encode($text, JSON_UNESCAPED_UNICODE);
|
|
}
|
|
|
|
// Handle SSE chunks if response is SSE format
|
|
if (!$text && strpos((string)$backend_resp, 'data:') !== false) {
|
|
preg_match_all('/data:\s*(\{[^\n]+\})/', $backend_resp, $m);
|
|
$chunks = [];
|
|
foreach ($m[1] ?? [] as $chunk) {
|
|
$cd = @json_decode($chunk, true);
|
|
if (!empty($cd['text'])) $chunks[] = $cd['text'];
|
|
}
|
|
$text = implode("\n", $chunks);
|
|
}
|
|
|
|
$backend_ok = !empty($text) && strlen($text) > 10;
|
|
|
|
emit('execute', [
|
|
'status' => 'complete',
|
|
'backend_ok' => $backend_ok,
|
|
'response_length' => strlen($text),
|
|
'duration_ms' => round((microtime(true) - $t4) * 1000, 1),
|
|
]);
|
|
|
|
// PHASE 5 · TESTS
|
|
$t5 = microtime(true);
|
|
$tests = [
|
|
'has_response' => $backend_ok,
|
|
'no_error' => !preg_match('/\berror\b|\bfailed\b|\bexception\b/i', substr($text, 0, 200)),
|
|
'not_simulated' => !preg_match('/simulat(ed|ion)|mock|fake|placeholder/i', substr($text, 0, 300)),
|
|
'not_hallucinating' => !preg_match('/\b(je ne sais pas|imagine|hypothetical|suppose que|probablement|might be)\b/i', substr($text, 0, 300)),
|
|
'has_natural_lang' => preg_match('/\b(le|la|les|un|je|vous|nous|est|sont|the|is|are)\b/i', substr($text, 0, 200)) > 0,
|
|
];
|
|
$passed = array_sum(array_map('intval', $tests));
|
|
|
|
emit('tests', [
|
|
'passed' => $passed,
|
|
'total' => count($tests),
|
|
'details' => $tests,
|
|
'duration_ms' => round((microtime(true) - $t5) * 1000, 1),
|
|
]);
|
|
|
|
// PHASE 6 · RESPONSE
|
|
emit('response', [
|
|
'text' => $text,
|
|
'length' => strlen($text),
|
|
]);
|
|
|
|
// PHASE 7 · CRITIQUE
|
|
$notes = [];
|
|
if ($passed < 5) $notes[] = "Some tests failed ({$passed}/5)";
|
|
if (strlen($text) < 30) $notes[] = "Short response";
|
|
if ((microtime(true) - $t4) > 10) $notes[] = "Slow response";
|
|
if (!$notes) $notes[] = "Quality OK";
|
|
|
|
emit('critique', [
|
|
'quality_score' => $passed / 5,
|
|
'quality' => $passed === 5 ? 'EXCELLENT' : ($passed >= 4 ? 'GOOD' : ($passed >= 3 ? 'OK' : 'LOW')),
|
|
'notes' => $notes,
|
|
]);
|
|
|
|
// MEMORY SAVE (direct PG · real schema user_message + assistant_response)
|
|
if ($memory_scope === 'persistent' && $backend_ok && $session) {
|
|
$saved = false;
|
|
try {
|
|
$pgconn = @pg_connect("host=127.0.0.1 dbname=adx_system user=admin password=admin123 connect_timeout=2");
|
|
if ($pgconn) {
|
|
$latency = (int)round((microtime(true) - $t1) * 1000);
|
|
@pg_query_params($pgconn,
|
|
"INSERT INTO wevia_conversations(session_id, user_message, assistant_response, intent, provider, latency_ms) VALUES ($1, $2, $3, $4, $5, $6)",
|
|
[$session, mb_substr($message, 0, 2000), mb_substr($text, 0, 4000), $intent, $chatbot, $latency]
|
|
);
|
|
$saved = true;
|
|
pg_close($pgconn);
|
|
}
|
|
} catch (Throwable $e) {}
|
|
emit('memory_saved', ['saved' => $saved, 'session' => $session]);
|
|
}
|
|
|
|
// LEARNING LOG (ai_learning_log · ALL chatbots · public anonymized)
|
|
try {
|
|
$pgL = @pg_connect("host=127.0.0.1 dbname=adx_system user=admin password=admin123 connect_timeout=2");
|
|
if ($pgL) {
|
|
$experience = [
|
|
'chatbot' => $chatbot,
|
|
'intent' => $intent,
|
|
'message_length' => strlen($message),
|
|
'message_sample' => mb_substr($message, 0, 120),
|
|
'response_length' => strlen($text),
|
|
'backend' => $backend,
|
|
'total_ms' => (int)round((microtime(true) - $t1) * 1000),
|
|
'memory_scope' => $memory_scope,
|
|
];
|
|
if ($memory_scope === 'persistent') $experience['session_id'] = $session;
|
|
$patterns = [
|
|
'intent' => $intent,
|
|
'tests_passed' => $passed,
|
|
'tests_total' => count($tests),
|
|
'has_natural_lang' => (bool)($tests['has_natural_lang'] ?? false),
|
|
'not_hallucinating' => (bool)($tests['not_hallucinating'] ?? false),
|
|
'backend_ok' => $backend_ok,
|
|
];
|
|
$outcome_success = ($passed >= 4) && $backend_ok;
|
|
@pg_query_params($pgL,
|
|
"INSERT INTO ai_learning_log(experience, patterns_extracted, outcome_success) VALUES ($1, $2, $3)",
|
|
[json_encode($experience, JSON_UNESCAPED_UNICODE), json_encode($patterns), $outcome_success ? 't' : 'f']
|
|
);
|
|
pg_close($pgL);
|
|
emit('learned', ['logged' => true, 'outcome' => $outcome_success ? 'success' : 'partial']);
|
|
}
|
|
} catch (Throwable $e) {}
|
|
|
|
// DONE
|
|
emit('done', [
|
|
'total_duration_ms' => round((microtime(true) - $t1) * 1000, 1),
|
|
'chatbot' => $chatbot,
|
|
'session' => $session,
|
|
'memory_scope' => $memory_scope,
|
|
'history_loaded' => $history_count,
|
|
]);
|