Files
weval-l99/test-enterprise-full.py
2026-04-13 12:43:21 +02:00

197 lines
7.7 KiB
Python
Executable File

#!/usr/bin/env python3
"""
Enterprise-Model Full Test — Playwright
Video: 45s recording for cinematic agent movement
Tests: all departments, pipelines, bubbles, multi-pipeline, buttons
Output: video + screenshots + JSON report
"""
from playwright.sync_api import sync_playwright
import time, json, os
RESULTS = {"tests": [], "errors": [], "video": None, "screenshots": []}
VID_DIR = "/opt/weval-l99/screenshots/videos"
SS_DIR = "/opt/weval-l99/screenshots/current"
os.makedirs(VID_DIR, exist_ok=True)
os.makedirs(SS_DIR, exist_ok=True)
def test(name, status, detail=""):
RESULTS["tests"].append({"name": name, "status": status, "detail": detail})
icon = "PASS" if status == "PASS" else "FAIL" if status == "FAIL" else "WARN"
print(f" [{icon}] {name}: {detail}")
with sync_playwright() as pw:
browser = pw.chromium.launch(headless=True, args=["--no-sandbox"])
ctx = browser.new_context(
viewport={"width": 1440, "height": 900},
ignore_https_errors=True,
record_video_dir=VID_DIR,
record_video_size={"width": 1440, "height": 900}
)
page = ctx.new_page()
# Capture JS errors
js_errors = []
page.on("console", lambda m: js_errors.append(m.text) if m.type == "error" else None)
page.on("pageerror", lambda e: js_errors.append(str(e)))
print("=== ENTERPRISE-MODEL FULL TEST ===")
print("Loading page...")
page.goto("https://weval-consulting.com/enterprise-model.html", timeout=20000, wait_until="domcontentloaded")
time.sleep(3)
# === TEST 1: Page loads, canvas renders ===
canvas_w = page.evaluate("document.getElementById('c')?.width") or 0
ag_count = page.evaluate("typeof AG !== 'undefined' ? AG.length : -1")
dp_count = page.evaluate("typeof DP !== 'undefined' ? DP.length : -1")
test("canvas-render", "PASS" if canvas_w > 500 else "FAIL", f"canvas={canvas_w}px")
test("agents-loaded", "PASS" if ag_count > 600 else "FAIL", f"AG={ag_count}")
test("departments", "PASS" if dp_count >= 20 else "FAIL", f"DP={dp_count}")
# Screenshot: initial state
page.screenshot(path=f"{SS_DIR}/em-initial.png")
RESULTS["screenshots"].append("em-initial.png")
# === TEST 2: All departments have agents ===
dept_counts = page.evaluate("""() => {
var counts = {};
DP.forEach(function(d) {
counts[d.id] = AG.filter(function(a) { return a.rm === d.id; }).length;
});
return counts;
}""")
empty_depts = [k for k, v in dept_counts.items() if v == 0]
test("dept-agents", "PASS" if not empty_depts else "WARN",
f"{len(dept_counts)} depts, empty={empty_depts or 'none'}")
# === TEST 3: Pipeline phases exist for all departments ===
pipeline_check = page.evaluate("""() => {
var results = [];
DP.forEach(function(d) {
if (d.pp && d.pp.length > 0) {
results.push({id: d.id, phases: d.pp.length, names: d.pp.join(',')});
}
});
return results;
}""")
test("pipeline-phases", "PASS" if len(pipeline_check) >= 10 else "FAIL",
f"{len(pipeline_check)} depts with pipelines")
# === TEST 4: Corridor routing function exists ===
has_corridor = page.evaluate("typeof corridorX === 'function'")
test("corridor-routing", "PASS" if has_corridor else "FAIL",
f"corridorX={has_corridor}")
# === TEST 5: Trigger agents and watch movement ===
print("\nTriggering agents for movement test (45s recording)...")
# Trigger multiple departments
triggers = [
("L99 PILOT", "NonReg scan"),
("Ethica", "HCP scrape"),
("Writer", "Cold email"),
("Architect", "Cloud design"),
("DeerFlow", "Deep research"),
("Debugger", "Fix API"),
("QA", "Test run"),
]
for name, action in triggers:
result = page.evaluate(f"typeof trig === 'function' ? trig('{name}', '{action}') : false")
# Also trigger random agents per department
page.evaluate("""() => {
['dev','con','sec','ops','sal','qa','pha','mta','saas','cron','intg','ai'].forEach(function(d) {
if (typeof trigD === 'function') trigD(d, 'Auto test');
});
}""")
time.sleep(2)
# Check walking agents
walking = page.evaluate("AG.filter(function(a){return a.si!=='sit';}).length")
test("agents-walking", "PASS" if walking > 5 else "FAIL", f"{walking} agents en mouvement")
# Screenshot: agents in motion
page.screenshot(path=f"{SS_DIR}/em-movement.png")
RESULTS["screenshots"].append("em-movement.png")
# === TEST 6: Multi-pipeline crossing (check if any agent targets different dept) ===
cross_pipeline = page.evaluate("""() => {
var cross = 0;
AG.forEach(function(a) {
if (a.si !== 'sit' && a.cy) {
var homeDi = DP.findIndex(function(d) { return d.id === a.rm; });
if (homeDi >= 0) {
var homeY = 0;
for (var j = 0; j <= homeDi; j++) homeY += 60;
if (Math.abs(a.cy - homeY) > 100) cross++;
}
}
});
return cross;
}""")
test("multi-pipeline", "PASS" if cross_pipeline >= 0 else "WARN",
f"{cross_pipeline} cross-pipeline agents")
# === WAIT FOR CINEMATIC RECORDING (45 seconds) ===
print("\nRecording cinematic movement (45s)...")
for i in range(9):
time.sleep(5)
# Trigger more agents every 5 seconds
page.evaluate("""() => {
var depts = ['dev','con','sec','ops','sal','qa','pha','mta','l99','cron'];
var d = depts[Math.floor(Math.random() * depts.length)];
if (typeof trigD === 'function') trigD(d, 'Pipeline task');
}""")
walking_now = page.evaluate("AG.filter(function(a){return a.si!=='sit';}).length")
print(f" t={i*5+5}s: {walking_now} agents walking")
# Final screenshot
page.screenshot(path=f"{SS_DIR}/em-final.png")
RESULTS["screenshots"].append("em-final.png")
# === TEST 7: Click on an agent (test bubble) ===
# Click in the center of the first department to hit an agent
page.mouse.click(200, 150)
time.sleep(1)
page.screenshot(path=f"{SS_DIR}/em-click.png")
RESULTS["screenshots"].append("em-click.png")
# === TEST 8: HUD displays correctly ===
hud_text = page.evaluate("document.getElementById('st')?.textContent") or ""
test("hud-display", "PASS" if "LIVE" in hud_text else "FAIL", f"HUD={hud_text[:60]}")
# === TEST 9: Navigation buttons work ===
nav_buttons = page.evaluate("document.querySelectorAll('#wnav a').length")
test("nav-buttons", "PASS" if nav_buttons >= 5 else "FAIL", f"{nav_buttons} nav buttons")
# === TEST 10: JS errors ===
test("js-errors", "PASS" if len(js_errors) == 0 else "FAIL",
f"{len(js_errors)} errors" + (f": {js_errors[0][:80]}" if js_errors else ""))
# Close and get video path
page.close()
ctx.close()
browser.close()
# Summary
total = len(RESULTS["tests"])
passed = sum(1 for t in RESULTS["tests"] if t["status"] == "PASS")
failed = sum(1 for t in RESULTS["tests"] if t["status"] == "FAIL")
print(f"\n=== RESULTS: {passed}/{total} PASS, {failed} FAIL ===")
# Save report
RESULTS["summary"] = {"total": total, "pass": passed, "fail": failed}
with open("/var/www/html/api/l99-enterprise-test.json", "w") as f:
json.dump(RESULTS, f, indent=2)
# List video
videos = [f for f in os.listdir(VID_DIR) if f.endswith(".webm")]
if videos:
latest = sorted(videos)[-1]
size = os.path.getsize(os.path.join(VID_DIR, latest))
print(f"Video: {latest} ({size//1024}KB)")
RESULTS["video"] = latest
print("Report: /api/l99-enterprise-test.json")