Files
weval-l99/wevia-cortex.py

234 lines
9.1 KiB
Python

#!/usr/bin/env python3
"""WEVIA CORTEX v3.0 — Autonomous Infrastructure Brain
Runs every 4h via cron. Checks EVERYTHING, analyzes via Cerebras 235B, reports Mattermost."""
import json, os, ssl, subprocess, urllib.request, time, socket
from datetime import datetime
ssl._create_default_https_context = ssl._create_unverified_context
MATTERMOST = "http://localhost:8065/hooks/pt54hzthf3b6pe6rgp1ionipnh"
CEREBRAS_KEY = ""
LOG = "/var/log/wevia-cortex.log"
def log(msg):
line = f"[{datetime.now().strftime('%H:%M:%S')}] {msg}"
print(line)
try:
with open(LOG, "a") as f: f.write(line + "\n")
except: pass
def http_get(url, timeout=10):
try:
r = urllib.request.urlopen(url, timeout=timeout)
return r.read().decode('utf-8', errors='replace')
except: return None
def http_post(url, data, timeout=15):
try:
req = urllib.request.Request(url, json.dumps(data).encode(),
headers={"Content-Type": "application/json"})
r = urllib.request.urlopen(req, timeout=timeout)
return json.loads(r.read())
except: return None
def cmd(c, timeout=10):
try:
r = subprocess.run(c, shell=True, capture_output=True, text=True, timeout=timeout)
return r.stdout.strip()
except: return ""
def load_cerebras_key():
global CEREBRAS_KEY
try:
for line in open("/etc/weval/secrets.env"):
if any(line.startswith(p) for p in ["CEREBRAS_KEY_2=","CEREBRAS_KEY2=","CEREBRAS_API_KEY="]):
CEREBRAS_KEY = line.strip().split("=",1)[1]
return
except: pass
def cerebras_analyze(prompt):
if not CEREBRAS_KEY: return None
try:
data = {"model":"qwen-3-235b-a22b-instruct-2507","messages":[{"role":"user","content":prompt}],
"max_tokens":500,"temperature":0.3}
req = urllib.request.Request("https://api.cerebras.ai/v1/chat/completions",
json.dumps(data).encode(),
headers={"Content-Type":"application/json","Authorization":f"Bearer {CEREBRAS_KEY}"})
r = urllib.request.urlopen(req, timeout=30)
d = json.loads(r.read())
return d["choices"][0]["message"]["content"]
except Exception as e:
log(f"Cerebras error: {e}")
return None
def main():
log("=" * 60)
log("WEVIA CORTEX v3.0 — Full Autonomous Brain")
log("=" * 60)
load_cerebras_key()
report = {"timestamp": datetime.now().strftime('%Y-%m-%d %H:%M'), "checks": {}}
# ═══ 1. REGISTRY ═══
registry = None
try:
r = subprocess.run(["php", "/opt/weval-l99/registry-local.php"],
capture_output=True, text=True, timeout=30)
if r.stdout:
parsed = json.loads(r.stdout.strip())
registry = parsed if isinstance(parsed, dict) else {"data": parsed}
except Exception as e:
log(f"Registry error: {e}")
if registry:
agents = registry.get("agents", {}).get("total", "?") if isinstance(registry.get("agents"), dict) else "?"
report["checks"]["registry"] = f"{agents} agents"
log(f"Registry: {agents} agents")
else:
report["checks"]["registry"] = "unavailable"
log("Registry unavailable")
# ═══ 2. SYSTEM HEALTH ═══
docker_count = cmd("docker ps | tail -n+2 | wc -l")
ram = cmd("free -h | grep Mem | awk '{print $3\"/\"$2}'")
disk = cmd("df -h / | tail -1 | awk '{print $5}'")
load = cmd("cat /proc/loadavg | awk '{print $1}'")
uptime = cmd("uptime -p")
report["checks"]["system"] = {"docker": docker_count, "ram": ram, "disk": disk, "load": load, "uptime": uptime}
log(f"System: Docker={docker_count} RAM={ram} Disk={disk} Load={load}")
# ═══ 3. SERVICES ═══
services = {}
svc_list = [
("chatbot", "http://127.0.0.1:11434/"),
("streaming", "http://127.0.0.1:6333/collections"), # Qdrant = streaming dep
("deerflow", "http://127.0.0.1:2024/ok"),
("paperclip", "http://127.0.0.1:3100/"),
("ollama", "http://127.0.0.1:11434/"),
("searxng", "http://127.0.0.1:8080/"),
("mattermost", "http://127.0.0.1:8065/api/v4/system/ping"),
("loki", "http://127.0.0.1:3200/ready"),
("plausible", "http://127.0.0.1:8000/"),
("qdrant", "http://127.0.0.1:6333/collections"),
]
ok_count = 0
for name, url in svc_list:
r = http_get(url, timeout=5)
alive = r is not None and len(r) > 0
services[name] = "ok" if alive else "DOWN"
if alive: ok_count += 1
report["checks"]["services"] = f"{ok_count}/{len(svc_list)} OK"
log(f"Services: {ok_count}/{len(svc_list)} OK")
down = [k for k,v in services.items() if v == "DOWN"]
if down: log(f"DOWN: {', '.join(down)}")
# ═══ 4. NONREG ═══
try:
nr = json.loads(http_get("https://weval-consulting.com/api/nonreg-api.php?cat=all") or "{}")
report["checks"]["nonreg"] = f"{nr.get('pass','?')}/{nr.get('total','?')} ({nr.get('score','?')}%)"
log(f"NonReg: {nr.get('pass','?')}/{nr.get('total','?')} ({nr.get('score','?')}%)")
except:
report["checks"]["nonreg"] = "unavailable"
# ═══ 5. QDRANT ═══
total_vectors = 0
for coll in ['weval_skills', 'wevia_kb', 'wevia_memory', 'wevia_learnings']:
try:
d = json.loads(http_get(f"http://127.0.0.1:6333/collections/{coll}") or "{}")
total_vectors += d.get("result", {}).get("points_count", 0)
except: pass
report["checks"]["qdrant"] = f"{total_vectors} vectors"
log(f"Qdrant: {total_vectors} vectors")
# ═══ 6. CRONS ═══
cron_count = cmd("ls /etc/cron.d/weval-* 2>/dev/null | wc -l")
report["checks"]["crons"] = f"{cron_count} active"
# ═══ 7. ROUTES ═══
routes = cmd("grep -c '// Route' /var/www/html/api/weval-ia-fast.php 2>/dev/null")
report["checks"]["routes"] = routes
# ═══ 8. DATASET ═══
dataset = cmd("wc -l < /opt/wevia-brain/training-data/weval-merged-dataset.jsonl 2>/dev/null")
report["checks"]["dataset"] = f"{dataset} pairs"
# ═══ 9. WIKI ═══
wiki = cmd("ls /opt/weval-l99/wiki/*.json 2>/dev/null | wc -l")
report["checks"]["wiki"] = f"{wiki} entries"
# ═══ 10. ENTERPRISE ═══
try:
import re
ec = open("/var/www/html/enterprise-model.html").read()
total_agents = len(re.findall(r"rm:'([^']+)'", ec))
dorm = ec.count("rm:'dorm'")
dead = ec.count("rm:'dead'")
report["checks"]["enterprise"] = f"{total_agents} agents (dorm={dorm} dead={dead})"
except:
report["checks"]["enterprise"] = "unavailable"
# ═══ 11. CEREBRAS AI ANALYSIS ═══
summary = json.dumps(report["checks"], indent=2)
prompt = f"""Tu es WEVIA CORTEX, le cerveau autonome de WEVAL Consulting.
Voici l'état actuel du système:
{summary}
Services DOWN: {', '.join(down) if down else 'AUCUN'}
Donne exactement 3 actions prioritaires à prendre. Sois concis (3 lignes max)."""
analysis = cerebras_analyze(prompt)
if analysis:
report["analysis"] = analysis
log(f"Cerebras analysis: {len(analysis)} chars")
else:
# Fallback Ollama
try:
data = {"model":"weval-brain-v3","prompt":prompt,"stream":False}
req = urllib.request.Request("http://127.0.0.1:11434/api/generate",
json.dumps(data).encode(), headers={"Content-Type":"application/json"})
r = urllib.request.urlopen(req, timeout=60)
d = json.loads(r.read())
report["analysis"] = d.get("response","")[:500]
log(f"Ollama analysis: {len(report.get('analysis',''))} chars")
except Exception as e:
log(f"Analysis error: {e}")
report["analysis"] = "Analyse indisponible"
# ═══ 12. MATTERMOST REPORT ═══
emoji = "🟢" if ok_count >= 9 else ("🟡" if ok_count >= 7 else "🔴")
mm_text = f"""{emoji} **WEVIA CORTEX v3.0** — {report['timestamp']}
**Services:** {report['checks'].get('services','')}
**NonReg:** {report['checks'].get('nonreg','')}
**System:** Docker={docker_count} RAM={ram} Disk={disk} Load={load}
**Qdrant:** {total_vectors} vectors | **Routes:** {routes} | **Dataset:** {dataset} pairs
**Wiki:** {wiki} entries | **Crons:** {cron_count}
**Enterprise:** {report['checks'].get('enterprise','')}
{'**DOWN:** ' + ', '.join(down) if down else ''}
---
**Analyse IA:** {report.get('analysis','N/A')[:300]}"""
try:
req = urllib.request.Request(MATTERMOST, json.dumps({"text": mm_text}).encode(),
headers={"Content-Type":"application/json"})
urllib.request.urlopen(req, timeout=10)
log("Mattermost report sent ✅")
except Exception as e:
log(f"Mattermost error: {e}")
# ═══ 13. SAVE JSON REPORT ═══
try:
with open("/var/www/html/api/cortex-report.json", "w") as f:
json.dump(report, f, indent=2, ensure_ascii=False)
log("JSON report saved")
except: pass
log("CORTEX v3.0 DONE ✅")
try:
main()
except Exception as e:
print(f"CORTEX CRASHED: {e}")
import traceback; traceback.print_exc()