131 lines
5.8 KiB
Python
131 lines
5.8 KiB
Python
#!/usr/bin/env python3
|
|
"""WEVIA AGENT SCANNER v1.0 — Deep Architecture Scanner + Wiki Updater
|
|
Scans: servers, Docker, APIs, routes, skills, crons, Qdrant, providers, pages, OSS
|
|
Updates wiki automatically. Cron */2h."""
|
|
import json, os, subprocess, urllib.request, ssl, glob
|
|
from datetime import datetime
|
|
|
|
ssl._create_default_https_context = ssl._create_unverified_context
|
|
WIKI_DIR = "/opt/weval-l99/wiki"
|
|
REPORT = "/var/www/html/api/agent-scanner-report.json"
|
|
MM_HOOK = "http://localhost:8065/hooks/pt54hzthf3b6pe6rgp1ionipnh"
|
|
os.makedirs(WIKI_DIR, exist_ok=True)
|
|
|
|
def log(m): print(f"[{datetime.now().strftime('%H:%M:%S')}] {m}")
|
|
def sh(c): return subprocess.run(c, shell=True, capture_output=True, text=True, timeout=10).stdout.strip()
|
|
def wiki(key, data):
|
|
with open(f"{WIKI_DIR}/{key}.json", "w") as f: json.dump({"key": key, "updated": datetime.now().isoformat(), "data": data}, f, ensure_ascii=False, indent=2)
|
|
|
|
def main():
|
|
log("=== AGENT SCANNER v1.0 ===")
|
|
report = {"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M"), "sections": {}}
|
|
|
|
# 1. SERVERS
|
|
log("1. Servers")
|
|
s204 = {"docker": int(sh("docker ps | tail -n+2 | wc -l") or 0), "disk": sh("df -h / | tail -1 | awk '{print $5}'"),
|
|
"ram": sh("free -h | grep Mem | awk '{print $3\"/\"$2}'"), "load": sh("cat /proc/loadavg | head -c 5"),
|
|
"uptime": sh("uptime -p")}
|
|
report["sections"]["servers"] = {"S204": s204}
|
|
wiki("SERVERS", s204)
|
|
|
|
# 2. DOCKER CONTAINERS
|
|
log("2. Docker")
|
|
containers = []
|
|
for line in sh("docker ps --format '{{.Names}}|{{.Status}}|{{.Ports}}'").split("\n"):
|
|
if line:
|
|
parts = line.split("|")
|
|
containers.append({"name": parts[0], "status": parts[1] if len(parts) > 1 else "?", "ports": parts[2] if len(parts) > 2 else ""})
|
|
report["sections"]["docker"] = {"count": len(containers), "containers": containers}
|
|
wiki("DOCKER", {"count": len(containers), "names": [c["name"] for c in containers]})
|
|
|
|
# 3. APIS
|
|
log("3. APIs")
|
|
apis = [os.path.basename(f) for f in glob.glob("/var/www/html/api/wevia-*.php") + glob.glob("/var/www/html/api/weval-*.php")]
|
|
report["sections"]["apis"] = {"count": len(apis), "files": apis}
|
|
wiki("APIS", {"count": len(apis), "list": apis})
|
|
|
|
# 4. ROUTES
|
|
log("4. Routes")
|
|
fast = "/var/www/html/api/weval-ia-fast.php"
|
|
if os.path.exists(fast):
|
|
lines = sum(1 for _ in open(fast))
|
|
routes = sum(1 for l in open(fast) if "// Route" in l)
|
|
report["sections"]["routes"] = {"lines": lines, "count": routes}
|
|
wiki("ROUTES", {"lines": lines, "count": routes})
|
|
|
|
# 5. SKILLS
|
|
log("5. Skills")
|
|
skills = [os.path.basename(d) for d in glob.glob("/opt/deer-flow/skills/weval/*/")]
|
|
report["sections"]["skills"] = {"count": len(skills)}
|
|
wiki("SKILLS", {"count": len(skills), "latest": skills[-5:] if skills else []})
|
|
|
|
# 6. CRONS
|
|
log("6. Crons")
|
|
crons = glob.glob("/etc/cron.d/weval-*")
|
|
report["sections"]["crons"] = {"count": len(crons), "files": [os.path.basename(c) for c in crons]}
|
|
wiki("CRONS", {"count": len(crons), "list": [os.path.basename(c) for c in crons]})
|
|
|
|
# 7. QDRANT
|
|
log("7. Qdrant")
|
|
try:
|
|
r = urllib.request.urlopen("http://127.0.0.1:6333/collections", timeout=5).read()
|
|
d = json.loads(r)
|
|
colls = {}
|
|
for c in d.get("result", {}).get("collections", []):
|
|
name = c.get("name", "?")
|
|
try:
|
|
r2 = urllib.request.urlopen(f"http://127.0.0.1:6333/collections/{name}", timeout=3).read()
|
|
d2 = json.loads(r2)
|
|
colls[name] = d2.get("result", {}).get("points_count", 0)
|
|
except: colls[name] = "?"
|
|
total = sum(v for v in colls.values() if isinstance(v, int))
|
|
report["sections"]["qdrant"] = {"total": total, "collections": colls}
|
|
wiki("QDRANT", {"total": total, "collections": colls})
|
|
except: report["sections"]["qdrant"] = "unavailable"
|
|
|
|
# 8. OLLAMA
|
|
log("8. Ollama")
|
|
try:
|
|
r = urllib.request.urlopen("http://127.0.0.1:11434/api/tags", timeout=5).read()
|
|
d = json.loads(r)
|
|
models = [m.get("name", "?") for m in d.get("models", [])]
|
|
report["sections"]["ollama"] = {"count": len(models), "models": models}
|
|
wiki("OLLAMA", {"count": len(models), "models": models})
|
|
except: report["sections"]["ollama"] = "unavailable"
|
|
|
|
# 9. HTML PAGES
|
|
log("9. Pages")
|
|
pages = [os.path.basename(f) for f in glob.glob("/var/www/html/*.html")]
|
|
report["sections"]["pages"] = {"count": len(pages)}
|
|
wiki("PAGES", {"count": len(pages)})
|
|
|
|
# 10. OPT TOOLS
|
|
log("10. /opt tools")
|
|
tools = [os.path.basename(d) for d in glob.glob("/opt/*/") if os.path.isdir(d)]
|
|
report["sections"]["opt_tools"] = {"count": len(tools)}
|
|
wiki("OPT_TOOLS", {"count": len(tools), "list": tools})
|
|
|
|
# 11. DATASET
|
|
log("11. Dataset")
|
|
ds_file = "/opt/wevia-brain/training-data/weval-merged-dataset.jsonl"
|
|
ds_count = int(sh(f"wc -l {ds_file} | awk '{{print $1}}'") or 0) if os.path.exists(ds_file) else 0
|
|
report["sections"]["dataset"] = {"pairs": ds_count}
|
|
wiki("DATASET", {"pairs": ds_count})
|
|
|
|
# 12. WIKI STATS
|
|
log("12. Wiki")
|
|
wiki_files = glob.glob(f"{WIKI_DIR}/*.json")
|
|
report["sections"]["wiki"] = {"entries": len(wiki_files)}
|
|
|
|
# SAVE
|
|
with open(REPORT, "w") as f: json.dump(report, f, indent=2, ensure_ascii=False)
|
|
log(f"Report saved: {REPORT}")
|
|
log(f"Wiki updated: {len(wiki_files)} entries")
|
|
|
|
# Summary
|
|
sections = report["sections"]
|
|
log(f"DONE: Docker={sections.get('docker',{}).get('count','?')} Routes={sections.get('routes',{}).get('count','?')} Skills={sections.get('skills',{}).get('count','?')} Crons={sections.get('crons',{}).get('count','?')} Wiki={sections.get('wiki',{}).get('entries','?')}")
|
|
|
|
try: main()
|
|
except Exception as e: print(f"CRASH: {e}"); import traceback; traceback.print_exc()
|