Files
html/top-ia/dialectical.sh
2026-04-16 23:31:21 +02:00

29 lines
1.5 KiB
Bash
Executable File

#!/bin/bash
# Raisonnement contradictoire: 2 agents (pro/con) + arbitre
Q="$*"
[ -z "$Q" ] && { echo '{"error":"need question"}'; exit 1; }
source /etc/weval/secrets.env 2>/dev/null
export Q GROQ_KEY CEREBRAS_API_KEY NVIDIA_NIM_KEY
python3 <<'PY'
import os, json, urllib.request
q = os.environ['Q']
def ask(provider, url, key, model, prompt):
try:
body = json.dumps({"model":model,"messages":[{"role":"user","content":prompt}],"max_tokens":180}).encode()
req = urllib.request.Request(url, data=body, headers={"Authorization":"Bearer "+key,"Content-Type":"application/json"})
d = json.loads(urllib.request.urlopen(req, timeout=15).read())
return d.get('choices',[{}])[0].get('message',{}).get('content','')[:400]
except Exception as e:
return f"ERR: {str(e)[:60]}"
pro = ask("groq","https://api.groq.com/openai/v1/chat/completions",os.environ.get('GROQ_KEY',''),"llama-3.1-8b-instant",
f"Defend this position with strong arguments (max 100 words): {q}")
con = ask("nvidia","https://integrate.api.nvidia.com/v1/chat/completions",os.environ.get('NVIDIA_NIM_KEY',''),"meta/llama-3.1-8b-instruct",
f"Argue AGAINST this position with strong counter-arguments (max 100 words): {q}")
arb = ask("cerebras","https://api.cerebras.ai/v1/chat/completions",os.environ.get('CEREBRAS_API_KEY',''),"llama3.1-8b",
f"PRO: {pro}\n\nCON: {con}\n\nAs neutral arbiter, synthesize the most nuanced position (max 100 words):")
print(json.dumps({"question":q,"pro":pro,"con":con,"arbiter":arb}, ensure_ascii=False))
PY