Includes core prod + GREEN/BLUE subsystems: - prod/ (BLUE harness, configs, scripts, docs) - nautilus_dolphin/ (GREEN Nautilus-native impl + dvae/ preserved) - adaptive_exit/ (AEM engine + models/bucket_assignments.pkl) - Observability/ (EsoF advisor, TUI, dashboards) - external_factors/ (EsoF producer) - mc_forewarning_qlabs_fork/ (MC regime/envelope) Excludes runtime caches, logs, backups, and reproducible artifacts per .gitignore.
235 lines
6.8 KiB
Python
Executable File
235 lines
6.8 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
"""
|
|
EXF Integration Test
|
|
====================
|
|
Quick validation of the complete ExF pipeline:
|
|
1. RealTimeExFService fetching
|
|
2. Hazelcast push
|
|
3. Persistence to disk
|
|
4. Integrity monitoring
|
|
|
|
Usage:
|
|
python test_exf_integration.py [--duration 30]
|
|
"""
|
|
|
|
import sys
|
|
import time
|
|
import json
|
|
import argparse
|
|
import logging
|
|
from pathlib import Path
|
|
|
|
# Setup paths
|
|
sys.path.insert(0, str(Path(__file__).parent))
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(message)s')
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
def test_service_only(duration_s: int = 30):
|
|
"""Test RealTimeExFService alone (no HZ/persistence)."""
|
|
from realtime_exf_service import RealTimeExFService
|
|
|
|
logger.info("=" * 60)
|
|
logger.info("TEST 1: RealTimeExFService Only")
|
|
logger.info("=" * 60)
|
|
|
|
svc = RealTimeExFService()
|
|
svc.start()
|
|
logger.info(f"Service started, warming up for 15s...")
|
|
time.sleep(15)
|
|
|
|
# Get indicators
|
|
indicators = svc.get_indicators(dual_sample=True)
|
|
status = svc.status()
|
|
|
|
logger.info(f"Indicators fetched: {len([k for k in indicators.keys() if not k.startswith('_')])}")
|
|
logger.info(f"Status: {status['indicators_ok']}/{status['indicators_total']} OK")
|
|
logger.info(f"ACB indicators: {status['acb_indicators_ok']}/{status['acb_indicators_total']}")
|
|
|
|
# Show sample values
|
|
for key in ['basis', 'spread', 'funding_btc', 'vix']:
|
|
if key in indicators:
|
|
logger.info(f" {key}: {indicators[key]}")
|
|
|
|
# Run for remaining duration
|
|
logger.info(f"Running for {duration_s}s...")
|
|
time.sleep(duration_s)
|
|
|
|
status = svc.status()
|
|
logger.info(f"Final: {status['indicators_ok']}/{status['indicators_total']} OK")
|
|
|
|
svc.stop()
|
|
logger.info("Test 1 PASSED")
|
|
return True
|
|
|
|
|
|
def test_with_persistence(duration_s: int = 60):
|
|
"""Test service + persistence."""
|
|
from realtime_exf_service import RealTimeExFService
|
|
from exf_persistence import ExFPersistenceService
|
|
|
|
logger.info("=" * 60)
|
|
logger.info("TEST 2: Service + Persistence")
|
|
logger.info("=" * 60)
|
|
|
|
# Start services
|
|
svc = RealTimeExFService()
|
|
svc.start()
|
|
|
|
persist = ExFPersistenceService(flush_interval_s=10) # Fast flush for test
|
|
persist.start()
|
|
|
|
logger.info("Services started, warming up...")
|
|
time.sleep(15)
|
|
|
|
# Update persistence a few times
|
|
for i in range(duration_s // 5):
|
|
indicators = svc.get_indicators(dual_sample=True)
|
|
persist.update_snapshot(indicators)
|
|
|
|
# Check stats
|
|
stats = persist.get_stats()
|
|
sufficiency = persist.check_data_sufficiency()
|
|
|
|
logger.info(f"Iteration {i+1}: files={stats['files_written']}, "
|
|
f"sufficiency={sufficiency['score']:.2f}, "
|
|
f"sufficient={sufficiency['sufficient']}")
|
|
|
|
time.sleep(5)
|
|
|
|
# Final stats
|
|
stats = persist.get_stats()
|
|
logger.info(f"Final stats: {json.dumps(stats, indent=2, default=str)}")
|
|
|
|
# Cleanup
|
|
persist.stop()
|
|
svc.stop()
|
|
|
|
if stats['files_written'] > 0:
|
|
logger.info("Test 2 PASSED")
|
|
return True
|
|
else:
|
|
logger.error("Test 2 FAILED: No files written")
|
|
return False
|
|
|
|
|
|
def test_full_pipeline(duration_s: int = 60):
|
|
"""Test full pipeline with Hazelcast (requires running HZ)."""
|
|
from realtime_exf_service import RealTimeExFService
|
|
from exf_persistence import ExFPersistenceService
|
|
from exf_integrity_monitor import ExFIntegrityMonitor
|
|
from _hz_push import make_hz_client, hz_push
|
|
|
|
logger.info("=" * 60)
|
|
logger.info("TEST 3: Full Pipeline (Service + HZ + Persist + Monitor)")
|
|
logger.info("=" * 60)
|
|
|
|
# Connect Hazelcast
|
|
try:
|
|
client = make_hz_client()
|
|
logger.info("Hazelcast connected")
|
|
except Exception as e:
|
|
logger.error(f"Hazelcast connection failed: {e}")
|
|
logger.info("Skipping Test 3 (HZ not available)")
|
|
return True # Not a failure, just no HZ
|
|
|
|
# Start services
|
|
svc = RealTimeExFService()
|
|
svc.start()
|
|
|
|
persist = ExFPersistenceService(flush_interval_s=15)
|
|
persist.start()
|
|
|
|
def get_indicators():
|
|
return svc.get_indicators(dual_sample=True)
|
|
|
|
monitor = ExFIntegrityMonitor(
|
|
hz_client=client,
|
|
persistence_service=persist,
|
|
indicator_source=get_indicators,
|
|
check_interval_s=10
|
|
)
|
|
monitor.start()
|
|
|
|
logger.info("All services started, warming up...")
|
|
time.sleep(15)
|
|
|
|
# Run pipeline
|
|
pushes = 0
|
|
for i in range(duration_s // 3):
|
|
indicators = svc.get_indicators(dual_sample=True)
|
|
|
|
# Push to HZ
|
|
success = hz_push("exf_test", indicators, client)
|
|
if success:
|
|
pushes += 1
|
|
|
|
# Update persistence
|
|
persist.update_snapshot(indicators)
|
|
|
|
# Check monitor
|
|
health = monitor.get_health_status()
|
|
if health:
|
|
logger.info(f"Push {pushes}: health={health.overall}, "
|
|
f"indicators={health.indicators_present}, "
|
|
f"acb_ready={health.acb_ready}")
|
|
|
|
time.sleep(3)
|
|
|
|
# Final stats
|
|
stats = persist.get_stats()
|
|
logger.info(f"Persistence: {stats['files_written']} files written")
|
|
|
|
alerts = monitor.get_recent_alerts(n=5)
|
|
if alerts:
|
|
logger.info(f"Recent alerts: {len(alerts)}")
|
|
for a in alerts:
|
|
logger.info(f" [{a.severity}] {a.message}")
|
|
|
|
# Cleanup
|
|
monitor.stop()
|
|
persist.stop()
|
|
svc.stop()
|
|
client.shutdown()
|
|
|
|
if pushes > 0 and stats['files_written'] > 0:
|
|
logger.info("Test 3 PASSED")
|
|
return True
|
|
else:
|
|
logger.error(f"Test 3 FAILED: pushes={pushes}, files={stats['files_written']}")
|
|
return False
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(description="ExF Integration Test")
|
|
parser.add_argument("--duration", type=int, default=30, help="Test duration per stage")
|
|
parser.add_argument("--test", choices=["1", "2", "3", "all"], default="all", help="Which test to run")
|
|
args = parser.parse_args()
|
|
|
|
results = []
|
|
|
|
if args.test in ["1", "all"]:
|
|
results.append(("Service Only", test_service_only(args.duration)))
|
|
|
|
if args.test in ["2", "all"]:
|
|
results.append(("With Persistence", test_with_persistence(args.duration)))
|
|
|
|
if args.test in ["3", "all"]:
|
|
results.append(("Full Pipeline", test_full_pipeline(args.duration)))
|
|
|
|
# Summary
|
|
logger.info("=" * 60)
|
|
logger.info("TEST SUMMARY")
|
|
logger.info("=" * 60)
|
|
for name, passed in results:
|
|
status = "PASS" if passed else "FAIL"
|
|
logger.info(f" {name}: {status}")
|
|
|
|
all_passed = all(r[1] for r in results)
|
|
sys.exit(0 if all_passed else 1)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|