Files
weval-l99/pw-exhaustive-auto.py
2026-04-19 15:48:31 +02:00

185 lines
7.9 KiB
Python
Executable File
Raw Permalink Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
#!/usr/bin/env python3
"""
EXHAUSTIVE AUTO-DISCOVERY TEST · 18avr2026
Auto-discover all HTML + API publics · test in parallel
Target: maximum coverage · 6σ statistical
"""
import os, sys, time, json, subprocess
from pathlib import Path
from playwright.sync_api import sync_playwright
TS = time.strftime("%Y%m%d-%H%M%S")
OUT = Path(f"/var/www/html/test-report/exhaustive-{TS}")
OUT.mkdir(parents=True, exist_ok=True)
# ═══════════════════════════════════════════════════════════════
# AUTO-DISCOVERY · scan filesystem for servable content
# ═══════════════════════════════════════════════════════════════
def discover_urls():
urls = []
# HTML root
for f in sorted(Path("/var/www/html").glob("*.html")):
if any(x in f.name for x in [".bak", ".gold", ".pre-", ".orig"]): continue
urls.append(("html", f"/{f.name}"))
# HTML in key subdirs
for subdir in ["products", "wevia-ia", "ethica", "agents", "admin"]:
p = Path(f"/var/www/html/{subdir}")
if p.is_dir():
for f in sorted(p.glob("*.html"))[:15]: # cap 15 per subdir
if any(x in f.name for x in [".bak", ".gold"]): continue
urls.append(("html", f"/{subdir}/{f.name}"))
# Key API endpoints (GET-able)
api_endpoints = [
"/api/stripe-live-bridge.php",
"/api/release-check.php?window=60",
"/api/dsh-predict-api.php",
"/api/wevia-v69-dg-command-center.php",
"/api/wevia-v71-intelligence-growth.php",
"/api/wevia-v83-business-kpi.php?action=summary",
"/api/wevia-v83-business-kpi.php?action=full",
"/api/weval-technology-platform-api.php",
"/api/nonreg-latest.json",
"/api/architecture-scan.json",
"/api/ecosystem-registry.php?q=wevia",
"/api/ethica-stats.php",
"/api/v83-business-kpi-latest.json",
"/api/playwright-business-coverage-latest.json",
"/api/crm-pipeline-live.php",
"/api/oss-trending.json",
"/api/source-of-truth.json",
"/api/wave-wiring-queue.json",
"/api/visual-management-live.php",
"/api/kpi-history-30d.php",
"/api/living-proof-api.php",
"/api/wevia-tool-registry.json",
"/api/cortex-report.json",
"/api/ux-agent-report.json",
]
for ep in api_endpoints:
urls.append(("api", ep))
# Subdomains
subdomains = [
("subdomain", "https://wevads.weval-consulting.com/"),
("subdomain", "https://git.weval-consulting.com/"),
("subdomain", "https://monitor.weval-consulting.com/"),
("subdomain", "https://paperclip.weval-consulting.com/"),
("subdomain", "https://analytics.weval-consulting.com/"),
("subdomain", "https://mm.weval-consulting.com/"),
("subdomain", "https://n8n.weval-consulting.com/"),
]
for t, url in subdomains:
urls.append((t, url))
return urls
discovered = discover_urls()
print(f"[discovery] {len(discovered)} URLs to test", flush=True)
results = {
"ts": TS,
"out": str(OUT),
"methodology": "EXHAUSTIVE AUTO-DISCOVERY · 6σ target",
"total_scenarios": len(discovered),
"scenarios": []
}
BASE = "https://weval-consulting.com"
try:
with sync_playwright() as p:
browser = p.chromium.launch(headless=True, args=["--no-sandbox", "--disable-dev-shm-usage"])
ctx = browser.new_context(
viewport={"width": 1600, "height": 1000},
record_video_dir=str(OUT),
record_video_size={"width": 1600, "height": 1000},
ignore_https_errors=True,
)
page = ctx.new_page()
pass_count = 0; fail_count = 0; auth_gate_count = 0; err_count = 0; total_ms = 0
for idx, (kind, path) in enumerate(discovered, 1):
t0 = time.time()
url = path if path.startswith("http") else f"{BASE}{path}"
slug = f"{idx:03d}_{kind}_{path.replace('/','_').replace('?','_').replace('=','_').replace('.','_')[:50]}"
entry = {"idx": idx, "kind": kind, "slug": slug, "url": url}
try:
resp = page.goto(url, wait_until="domcontentloaded", timeout=12000)
entry["http"] = resp.status if resp else None
time.sleep(0.8) # shorter for exhaustive
entry["title"] = page.title()[:60]
entry["body_len"] = page.evaluate("document.body ? document.body.innerText.length : 0")
entry["elapsed_ms"] = round((time.time() - t0) * 1000, 0)
# Screenshot only every 10th to save space
if idx % 5 == 0:
page.screenshot(path=str(OUT / f"{slug[:60]}.png"), full_page=False)
entry["screenshot"] = True
if entry["http"] == 200 and entry["body_len"] > 80:
entry["status"] = "PASS"
pass_count += 1
elif entry["http"] == 200 and entry["body_len"] <= 80:
entry["status"] = "AUTH_GATE"
auth_gate_count += 1
elif entry["http"] is not None and entry["http"] < 400:
entry["status"] = "REDIRECT"
auth_gate_count += 1
else:
entry["status"] = "FAIL"
fail_count += 1
total_ms += entry["elapsed_ms"]
if idx % 5 == 0:
print(f" [{idx}/{len(discovered)}] {slug[:40]:42s} {entry['status']:10s} {entry['http']} · {entry['body_len']}b · {entry['elapsed_ms']}ms", flush=True)
except Exception as e:
entry["error"] = str(e)[:100]
entry["status"] = "ERROR"
err_count += 1
if idx % 5 == 0:
print(f" [{idx}/{len(discovered)}] ERR: {e}", flush=True)
results["scenarios"].append(entry)
ctx.close()
browser.close()
# Six Sigma calc (exclude AUTH_GATE from defects since they're expected)
opp = len(discovered)
defects = fail_count + err_count # not auth_gate
dpmo = round((defects / opp) * 1_000_000) if opp else 0
if dpmo <= 3.4: sigma = "6σ (world-class)"
elif dpmo <= 230: sigma = "5σ"
elif dpmo <= 6210: sigma = "4σ"
elif dpmo <= 66807: sigma = "3σ"
else: sigma = "<3σ"
results["summary"] = {
"pass": pass_count,
"auth_gate_expected": auth_gate_count,
"fail": fail_count,
"error": err_count,
"total": opp,
"total_elapsed_ms": total_ms,
"avg_ms_per_scenario": round(total_ms / opp, 0) if opp else 0,
"defects_per_million_opportunities": dpmo,
"sigma_level": sigma,
"pass_rate_pct": round((pass_count / opp) * 100, 2),
"operational_rate_pct": round(((pass_count + auth_gate_count) / opp) * 100, 2),
}
print(f"\n[EXHAUSTIVE] {pass_count} PASS · {auth_gate_count} AUTH · {fail_count} FAIL · {err_count} ERR / {opp}", flush=True)
print(f"[6σ] DPMO={dpmo} · {sigma} · pass_rate={results['summary']['pass_rate_pct']}% · operational={results['summary']['operational_rate_pct']}%", flush=True)
except Exception as e:
results["fatal_error"] = str(e)
print(f"[FATAL] {e}", flush=True)
with open(OUT / "exhaustive-results.json","w") as f:
json.dump(results, f, indent=2)
videos = list(OUT.glob("*.webm"))
total_mb = sum(v.stat().st_size for v in videos) / (1024*1024)
print(f"[VIDEO] {len(videos)} files · {total_mb:.1f} MB")
screenshots = list(OUT.glob("*.png"))
print(f"[SCREENSHOTS] {len(screenshots)} files")
print(f"[OUT] {OUT}")