281 lines
12 KiB
Python
Executable File
281 lines
12 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
"""WEVIA AGENTS PACK v1.0
|
|
4 autonomous agents:
|
|
1. Responsive Agent — mobile/tablet/desktop visual tests
|
|
2. SSO Auth Agent — Authentik regression detection + auto-fix
|
|
3. API Key Renewal Agent — expired keys detection + renewal
|
|
4. Model Updater Agent — Ollama model freshness + GPU free resources
|
|
Cron: */30
|
|
"""
|
|
import subprocess as sp, json, os, time, glob, shutil
|
|
from datetime import datetime, timedelta
|
|
from pathlib import Path
|
|
|
|
LOG = "/var/log/wevia-agents-pack.log"
|
|
BD = Path("/opt/weval-l99")
|
|
SS = Path("/var/www/html/l99-screenshots")
|
|
VID = Path("/var/www/html/l99-videos")
|
|
STATUS = "/var/www/html/api/wevia-agents-pack-status.json"
|
|
B = "https://weval-consulting.com"
|
|
SSO = str(BD / "sso-state.json")
|
|
|
|
for d in [SS, VID, VID/"tmp"]: d.mkdir(parents=True, exist_ok=True)
|
|
|
|
ts = datetime.now()
|
|
results = {"timestamp": ts.isoformat(), "agents": {}}
|
|
|
|
def lg(m):
|
|
l = f"[{datetime.now().strftime('%H:%M:%S')}] {m}"
|
|
print(l, flush=True)
|
|
with open(LOG, "a") as f: f.write(l + "\n")
|
|
|
|
def cmd(c, t=10):
|
|
try:
|
|
r = sp.run(c, shell=True, capture_output=True, text=True, timeout=t)
|
|
return r.stdout.strip()
|
|
except: return ""
|
|
|
|
lg("=" * 50)
|
|
lg(f"AGENTS PACK — {ts}")
|
|
|
|
# ═══════════════════════════════════════════
|
|
# AGENT 1: RESPONSIVE — 5 viewports visual test
|
|
# ═══════════════════════════════════════════
|
|
lg("═══ AGENT 1: RESPONSIVE ═══")
|
|
resp = {"status": "GREEN", "tests": [], "fixes": []}
|
|
|
|
try:
|
|
from playwright.sync_api import sync_playwright
|
|
with sync_playwright() as pw:
|
|
br = pw.chromium.launch(headless=True, args=["--no-sandbox","--ignore-certificate-errors"])
|
|
|
|
viewports = [
|
|
("iphone", 375, 812, "Mozilla/5.0 (iPhone; CPU iPhone OS 16_0 like Mac OS X)"),
|
|
("android", 412, 915, "Mozilla/5.0 (Linux; Android 13; Pixel 7)"),
|
|
("ipad", 768, 1024, "Mozilla/5.0 (iPad; CPU OS 16_0 like Mac OS X)"),
|
|
("desktop", 1280, 720, None),
|
|
("wide", 1920, 1080, None),
|
|
]
|
|
|
|
pages_to_test = ["/", "/use-cases.html", "/wevia.html", "/blog.html"]
|
|
|
|
for vp_name, w, h, ua in viewports:
|
|
ctx_args = {"viewport": {"width": w, "height": h}, "ignore_https_errors": True,
|
|
"record_video_dir": str(VID / "tmp")}
|
|
if ua: ctx_args["user_agent"] = ua
|
|
ctx = br.new_context(**ctx_args)
|
|
|
|
for pg in pages_to_test:
|
|
page = ctx.new_page()
|
|
try:
|
|
page.goto(f"{B}{pg}", timeout=12000)
|
|
page.wait_for_timeout(2000)
|
|
bl = len(page.content())
|
|
|
|
# Check key elements
|
|
checks = {
|
|
"logo": page.locator("img,.logo,svg,header").first.count() > 0,
|
|
"content": bl > 5000,
|
|
"no_overflow": page.evaluate("() => document.body.scrollWidth <= window.innerWidth + 5"),
|
|
}
|
|
|
|
# Mobile-specific checks
|
|
if w < 768:
|
|
checks["hamburger"] = page.locator(".mobile-toggle").count() > 0
|
|
# Test hamburger click
|
|
btn = page.locator(".mobile-toggle")
|
|
if btn.count() > 0:
|
|
btn.click()
|
|
page.wait_for_timeout(600)
|
|
left = page.evaluate('() => {var n=document.querySelector(".mobile-navigation");return n?n.style.left:"?"}')
|
|
checks["menu_opens"] = left == "0px"
|
|
# Close it
|
|
page.evaluate('() => {var n=document.querySelector(".mobile-navigation");if(n)n.style.left="-100%"}')
|
|
|
|
all_ok = all(checks.values())
|
|
pg_name = pg.replace("/","").replace(".html","") or "homepage"
|
|
snap_name = f"resp-{vp_name}-{pg_name}"
|
|
page.screenshot(path=str(SS / f"{snap_name}.png"))
|
|
|
|
resp["tests"].append({
|
|
"viewport": vp_name, "page": pg, "ok": all_ok,
|
|
"checks": {k: v for k, v in checks.items()},
|
|
"body": bl
|
|
})
|
|
if not all_ok:
|
|
resp["status"] = "AMBER"
|
|
failed = [k for k, v in checks.items() if not v]
|
|
lg(f" {vp_name} {pg}: FAIL {failed}")
|
|
|
|
except Exception as e:
|
|
resp["tests"].append({"viewport": vp_name, "page": pg, "ok": False, "error": str(e)[:40]})
|
|
resp["status"] = "RED"
|
|
finally:
|
|
try:
|
|
vp_vid = page.video.path() if page.video else None
|
|
page.close()
|
|
if vp_vid and Path(vp_vid).exists():
|
|
shutil.move(str(vp_vid), str(VID / f"{snap_name}.webm"))
|
|
except: pass
|
|
|
|
ctx.close()
|
|
br.close()
|
|
except Exception as e:
|
|
resp["status"] = "RED"
|
|
resp["error"] = str(e)[:80]
|
|
lg(f" Playwright error: {e}")
|
|
|
|
pass_count = sum(1 for t in resp["tests"] if t.get("ok"))
|
|
total_count = len(resp["tests"])
|
|
lg(f" Responsive: {pass_count}/{total_count} ({resp['status']})")
|
|
results["agents"]["responsive"] = resp
|
|
|
|
# ═══════════════════════════════════════════
|
|
# AGENT 2: SSO AUTH — check + auto-fix
|
|
# ═══════════════════════════════════════════
|
|
lg("═══ AGENT 2: SSO AUTH ═══")
|
|
auth = {"status": "GREEN", "checks": [], "fixes": []}
|
|
|
|
# Check Authentik containers
|
|
for ct in ["authentik-server", "authentik-worker", "authentik-db", "authentik-redis"]:
|
|
status = cmd(f"docker inspect -f '{{{{.State.Status}}}}' {ct} 2>/dev/null")
|
|
ok = "running" in status
|
|
auth["checks"].append({"name": ct, "ok": ok})
|
|
if not ok:
|
|
lg(f" {ct} DOWN — restarting...")
|
|
cmd(f"docker restart {ct}", 15)
|
|
auth["fixes"].append(f"Restarted {ct}")
|
|
auth["status"] = "AMBER"
|
|
|
|
# Check Authentik outpost responds
|
|
outpost = cmd("curl -sf --max-time 3 https://127.0.0.1:9443/ -k -o /dev/null -w '%{http_code}'")
|
|
auth["checks"].append({"name": "outpost_https", "ok": outpost in ["200", "302", "301"]})
|
|
|
|
# Check SSO domains work
|
|
sso_domains = ["monitor.weval-consulting.com", "crm.weval-consulting.com",
|
|
"analytics.weval-consulting.com", "mm.weval-consulting.com", "n8n.weval-consulting.com"]
|
|
for dom in sso_domains:
|
|
code = cmd(f"curl -sk -o /dev/null -w '%{{http_code}}' https://{dom}/ --max-time 5")
|
|
ok = code in ["200", "302", "301"]
|
|
auth["checks"].append({"name": dom, "ok": ok})
|
|
if not ok:
|
|
auth["status"] = "AMBER"
|
|
lg(f" SSO domain {dom}: HTTP {code}")
|
|
|
|
# Check callback paths
|
|
cb = cmd("curl -sk -o /dev/null -w '%{http_code}' https://weval-consulting.com/outpost.goauthentik.io/ping --max-time 3")
|
|
auth["checks"].append({"name": "callback_ping", "ok": cb != "000"})
|
|
|
|
# Check nginx auth locations
|
|
nginx_ok = "successful" in cmd("nginx -t 2>&1")
|
|
auth["checks"].append({"name": "nginx_syntax", "ok": nginx_ok})
|
|
|
|
pass_auth = sum(1 for c in auth["checks"] if c["ok"])
|
|
lg(f" SSO Auth: {pass_auth}/{len(auth['checks'])} ({auth['status']})")
|
|
results["agents"]["sso_auth"] = auth
|
|
|
|
# ═══════════════════════════════════════════
|
|
# AGENT 3: API KEY RENEWAL
|
|
# ═══════════════════════════════════════════
|
|
lg("═══ AGENT 3: API KEY RENEWAL ═══")
|
|
keys = {"status": "GREEN", "checks": [], "alerts": []}
|
|
|
|
# Check GitHub PAT expiry
|
|
try:
|
|
pat = cmd("grep GH_PAT /etc/weval/secrets.env | cut -d= -f2")
|
|
if pat:
|
|
r = cmd(f"curl -sf -H 'Authorization: token {pat}' https://api.github.com/user --max-time 5")
|
|
if "login" in r:
|
|
keys["checks"].append({"name": "github_pat", "ok": True, "detail": "valid"})
|
|
else:
|
|
keys["checks"].append({"name": "github_pat", "ok": False, "detail": "invalid/expired"})
|
|
keys["alerts"].append("GitHub PAT expired! Renew at github.com/settings/tokens")
|
|
keys["status"] = "RED"
|
|
except: pass
|
|
|
|
# Check WhatsApp token
|
|
wa_token = cmd("grep WA_TOKEN /etc/weval/secrets.env | cut -d= -f2")
|
|
if wa_token and len(wa_token) > 20:
|
|
r = cmd(f"curl -sf 'https://graph.facebook.com/v18.0/me?access_token={wa_token[:50]}...' --max-time 5 2>/dev/null | head -c 30")
|
|
keys["checks"].append({"name": "whatsapp_token", "ok": "error" not in r.lower() if r else False})
|
|
else:
|
|
keys["checks"].append({"name": "whatsapp_token", "ok": False, "detail": "missing"})
|
|
keys["alerts"].append("WhatsApp token expired!")
|
|
|
|
# Check AI provider keys
|
|
providers = {
|
|
"cerebras": ("CEREBRAS_API_KEY", "https://api.cerebras.ai/v1/models"),
|
|
"groq": ("GROQ_API_KEY", "https://api.groq.com/openai/v1/models"),
|
|
}
|
|
secrets = {}
|
|
try:
|
|
for line in open("/etc/weval/secrets.env"):
|
|
if "=" in line and not line.startswith("#"):
|
|
k, v = line.strip().split("=", 1)
|
|
secrets[k] = v
|
|
except: pass
|
|
|
|
for name, (env_key, url) in providers.items():
|
|
key = secrets.get(env_key, "")
|
|
if key:
|
|
r = cmd(f"curl -sf -H 'Authorization: Bearer {key}' '{url}' --max-time 5 -o /dev/null -w '%{{http_code}}'")
|
|
ok = r in ["200", "201"]
|
|
keys["checks"].append({"name": name, "ok": ok})
|
|
if not ok:
|
|
keys["alerts"].append(f"{name} API key invalid (HTTP {r})")
|
|
else:
|
|
keys["checks"].append({"name": name, "ok": False, "detail": "missing"})
|
|
|
|
# Check SSL expiry
|
|
ssl_days = cmd("openssl x509 -enddate -noout -in /etc/ssl/certs/weval-selfsigned.crt 2>/dev/null | sed 's/notAfter=//'")
|
|
keys["checks"].append({"name": "ssl_cert", "ok": True, "detail": ssl_days[:30]})
|
|
|
|
lg(f" API Keys: {sum(1 for c in keys['checks'] if c.get('ok'))}/{len(keys['checks'])} | Alerts: {len(keys['alerts'])}")
|
|
results["agents"]["api_keys"] = keys
|
|
|
|
# ═══════════════════════════════════════════
|
|
# AGENT 4: MODEL UPDATER + GPU FREE
|
|
# ═══════════════════════════════════════════
|
|
lg("═══ AGENT 4: MODEL UPDATER ═══")
|
|
models = {"status": "GREEN", "ollama": [], "gpu_free": [], "updates": []}
|
|
|
|
# Check Ollama models
|
|
try:
|
|
r = cmd("curl -sf http://127.0.0.1:11435/api/tags")
|
|
d = json.loads(r)
|
|
for m in d.get("models", []):
|
|
name = m.get("name", "?")
|
|
size = m.get("size", 0)
|
|
modified = m.get("modified_at", "")
|
|
models["ollama"].append({"name": name, "size_gb": round(size / 1e9, 1), "modified": modified[:10]})
|
|
models["count"] = len(d.get("models", []))
|
|
except: models["status"] = "AMBER"
|
|
|
|
# GPU Free resources check
|
|
gpu_free = [
|
|
{"name": "Google Colab", "url": "https://colab.research.google.com", "type": "notebook"},
|
|
{"name": "Kaggle", "url": "https://www.kaggle.com", "type": "notebook"},
|
|
{"name": "HuggingFace Spaces", "url": "https://huggingface.co/spaces", "type": "inference"},
|
|
{"name": "Replicate", "url": "https://replicate.com", "type": "api"},
|
|
{"name": "Together.ai", "url": "https://api.together.xyz", "type": "api"},
|
|
{"name": "Cerebras", "url": "https://api.cerebras.ai", "type": "api"},
|
|
{"name": "Groq", "url": "https://api.groq.com", "type": "api"},
|
|
{"name": "SambaNova", "url": "https://api.sambanova.ai", "type": "api"},
|
|
]
|
|
for gpu in gpu_free:
|
|
code = cmd(f"curl -sf -o /dev/null -w '%{{http_code}}' '{gpu['url']}' --max-time 5")
|
|
gpu["reachable"] = code in ["200", "301", "302", "403"]
|
|
models["gpu_free"].append(gpu)
|
|
|
|
# Fine-tuning dataset check
|
|
hf_ds = cmd("curl -sf 'https://huggingface.co/api/datasets/yace222/weval-training-data' --max-time 5 | head -c 50")
|
|
models["hf_dataset"] = "exists" if "yace222" in hf_ds or len(hf_ds) > 10 else "check"
|
|
|
|
lg(f" Models: {models.get('count', 0)} Ollama | {sum(1 for g in models['gpu_free'] if g.get('reachable'))} GPU free reachable")
|
|
results["agents"]["model_updater"] = models
|
|
|
|
# ═══ SAVE ═══
|
|
json.dump(results, open(STATUS, "w"), indent=2)
|
|
lg(f"STATUS SAVED: {STATUS}")
|
|
lg("=" * 50)
|