143 lines
5.1 KiB
Python
143 lines
5.1 KiB
Python
|
|
"""
|
||
|
|
prod/tests/conftest.py
|
||
|
|
======================
|
||
|
|
Pytest session hooks — after every test run, push results to the TUI footer
|
||
|
|
via write_test_results() and to run_logs/test_results_latest.json.
|
||
|
|
|
||
|
|
Usage:
|
||
|
|
python -m pytest prod/tests/test_data_integrity.py --category data_integrity
|
||
|
|
python -m pytest prod/tests/test_finance_fuzz.py --category finance_fuzz
|
||
|
|
...
|
||
|
|
|
||
|
|
If --category is omitted the file-name is used to auto-detect the category.
|
||
|
|
|
||
|
|
Category → file mapping
|
||
|
|
data_integrity : test_data_integrity.py
|
||
|
|
finance_fuzz : test_finance_fuzz.py, test_acb_hz_status_integrity.py,
|
||
|
|
test_acb_hz_integration.py, test_nautilus_event_trader.py
|
||
|
|
signal_fill : test_signal_to_fill.py, test_acb_hz_status_integrity.py,
|
||
|
|
test_acb_hz_integration.py, test_nautilus_event_trader.py
|
||
|
|
degradation : test_degradational.py, test_mhs_v3.py
|
||
|
|
actor : test_mhs_v3.py, test_scan_bridge_prefect_daemon.py
|
||
|
|
monte_carlo : test_mc_scenarios.py
|
||
|
|
"""
|
||
|
|
|
||
|
|
import json
|
||
|
|
import sys
|
||
|
|
from datetime import datetime, timezone
|
||
|
|
from pathlib import Path
|
||
|
|
|
||
|
|
_RESULTS_PATH = Path(__file__).parent.parent.parent / "run_logs" / "test_results_latest.json"
|
||
|
|
|
||
|
|
|
||
|
|
def _write_results(payload: dict):
|
||
|
|
"""Always write to test_results_latest.json with _run_at timestamp.
|
||
|
|
Merges into existing file so multiple categories accumulate correctly.
|
||
|
|
"""
|
||
|
|
try:
|
||
|
|
existing = json.loads(_RESULTS_PATH.read_text()) if _RESULTS_PATH.exists() else {}
|
||
|
|
except Exception:
|
||
|
|
existing = {}
|
||
|
|
existing["_run_at"] = datetime.now(timezone.utc).isoformat()
|
||
|
|
existing.update(payload)
|
||
|
|
_RESULTS_PATH.write_text(json.dumps(existing, indent=2))
|
||
|
|
|
||
|
|
# ── Resolve write_test_results ──────────────────────────────────────────────
|
||
|
|
_TUI_DIR = Path(__file__).parent.parent.parent / "Observability" / "TUI"
|
||
|
|
sys.path.insert(0, str(_TUI_DIR))
|
||
|
|
try:
|
||
|
|
from dolphin_tui_v3 import write_test_results
|
||
|
|
_WTR_OK = True
|
||
|
|
except Exception:
|
||
|
|
_WTR_OK = False
|
||
|
|
|
||
|
|
# ── File → category map ─────────────────────────────────────────────────────
|
||
|
|
_FILE_CAT = {
|
||
|
|
"test_data_integrity": "data_integrity",
|
||
|
|
"test_finance_fuzz": "finance_fuzz",
|
||
|
|
"test_acb_hz_status_integrity": "finance_fuzz", # primary
|
||
|
|
"test_acb_hz_integration": "finance_fuzz",
|
||
|
|
"test_nautilus_event_trader": "signal_fill",
|
||
|
|
"test_signal_to_fill": "signal_fill",
|
||
|
|
"test_degradational": "degradation",
|
||
|
|
"test_mhs_v3": "degradation",
|
||
|
|
"test_scan_bridge_prefect_daemon": "actor",
|
||
|
|
"test_mc_scenarios": "monte_carlo",
|
||
|
|
}
|
||
|
|
|
||
|
|
_VALID_CATS = {"data_integrity", "finance_fuzz", "signal_fill", "degradation", "actor", "monte_carlo"}
|
||
|
|
|
||
|
|
|
||
|
|
def pytest_addoption(parser):
|
||
|
|
parser.addoption(
|
||
|
|
"--category",
|
||
|
|
default=None,
|
||
|
|
help="Override result category written to test_results_latest.json",
|
||
|
|
)
|
||
|
|
|
||
|
|
|
||
|
|
def _detect_category(session) -> str:
|
||
|
|
"""Infer category from collected item file paths."""
|
||
|
|
for item in session.items:
|
||
|
|
stem = Path(item.fspath).stem
|
||
|
|
if stem in _FILE_CAT:
|
||
|
|
return _FILE_CAT[stem]
|
||
|
|
return "actor" # safe fallback
|
||
|
|
|
||
|
|
|
||
|
|
# ── Per-item outcome collector ───────────────────────────────────────────────
|
||
|
|
|
||
|
|
class _Collector:
|
||
|
|
def __init__(self):
|
||
|
|
self.passed = 0
|
||
|
|
self.failed = 0
|
||
|
|
self.skipped = 0
|
||
|
|
self.errors = []
|
||
|
|
|
||
|
|
_collector = _Collector()
|
||
|
|
|
||
|
|
|
||
|
|
def pytest_runtest_logreport(report):
|
||
|
|
"""Called for setup / call / teardown phases of each test."""
|
||
|
|
if report.when != "call": # only count the actual test call
|
||
|
|
return
|
||
|
|
if report.passed:
|
||
|
|
_collector.passed += 1
|
||
|
|
elif report.failed:
|
||
|
|
_collector.failed += 1
|
||
|
|
if report.longreprtext:
|
||
|
|
_collector.errors.append(report.nodeid)
|
||
|
|
elif report.skipped:
|
||
|
|
_collector.skipped += 1
|
||
|
|
|
||
|
|
|
||
|
|
def pytest_sessionfinish(session, exitstatus):
|
||
|
|
"""Push results after the session completes."""
|
||
|
|
cat = session.config.getoption("--category", default=None) or _detect_category(session)
|
||
|
|
if cat not in _VALID_CATS:
|
||
|
|
cat = "actor"
|
||
|
|
|
||
|
|
total = _collector.passed + _collector.failed
|
||
|
|
status = "PASS" if _collector.failed == 0 and total > 0 else (
|
||
|
|
"FAIL" if _collector.failed > 0 else "N/A"
|
||
|
|
)
|
||
|
|
|
||
|
|
payload = {cat: {"passed": _collector.passed, "total": total, "status": status}}
|
||
|
|
|
||
|
|
# Always write JSON with _run_at — this is the M6 sensor source of truth.
|
||
|
|
_write_results(payload)
|
||
|
|
|
||
|
|
# Also push to TUI footer if available (best-effort, non-blocking).
|
||
|
|
if _WTR_OK:
|
||
|
|
try:
|
||
|
|
write_test_results(payload)
|
||
|
|
except Exception as e:
|
||
|
|
print(f"[conftest] write_test_results failed: {e}", file=sys.stderr)
|
||
|
|
|
||
|
|
print(
|
||
|
|
f"\n[TEST REPORT] category={cat} "
|
||
|
|
f"passed={_collector.passed}/{total} "
|
||
|
|
f"status={status}",
|
||
|
|
file=sys.stderr,
|
||
|
|
)
|