Files
wevia-brain/async-llm-worker.py
Yanis Mahboub a736d5d9b2 Wave 114 auto
2026-04-14 23:58:39 +02:00

32 lines
1.4 KiB
Python

#!/usr/bin/env python3
"""WEVAL Async LLM Worker - Redis queue -> Sovereign API"""
import redis, json, time, urllib.request
r = redis.Redis(host='127.0.0.1', port=6379, decode_responses=True)
def call_sovereign(prompt, max_tokens=150):
try:
data = json.dumps({"model":"auto","messages":[{"role":"user","content":prompt}],"max_tokens":max_tokens}).encode()
req = urllib.request.Request("http://127.0.0.1:4000/v1/chat/completions", data=data, headers={"Content-Type":"application/json"})
resp = urllib.request.urlopen(req, timeout=15)
d = json.loads(resp.read().decode())
return {"provider":d.get("provider","?"),"text":d["choices"][0]["message"]["content"]}
except Exception as e:
return {"provider":"error","text":str(e)}
print("[WORKER] Listening...")
while True:
try:
item = r.brpop("wevia:llm:queue", timeout=30)
if not item: continue
job = json.loads(item[1])
job_id = job.get("id","?")
result = call_sovereign(job.get("prompt",""), job.get("max_tokens",150))
result["job_id"] = job_id
r.set(f"wevia:llm:results:{job_id}", json.dumps(result), ex=300)
r.publish(f"wevia:llm:done:{job_id}", json.dumps(result))
print(f"[WORKER] {job_id}: {result['provider']}")
except Exception as e:
print(f"[WORKER] Error: {e}")
time.sleep(1)