initial: import DOLPHIN baseline 2026-04-21 from dolphinng5_predict working tree
Includes core prod + GREEN/BLUE subsystems: - prod/ (BLUE harness, configs, scripts, docs) - nautilus_dolphin/ (GREEN Nautilus-native impl + dvae/ preserved) - adaptive_exit/ (AEM engine + models/bucket_assignments.pkl) - Observability/ (EsoF advisor, TUI, dashboards) - external_factors/ (EsoF producer) - mc_forewarning_qlabs_fork/ (MC regime/envelope) Excludes runtime caches, logs, backups, and reproducible artifacts per .gitignore.
This commit is contained in:
131
prod/exf_prefect_production.py
Executable file
131
prod/exf_prefect_production.py
Executable file
@@ -0,0 +1,131 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
DOLPHIN ExF Production Flow
|
||||
============================
|
||||
External Factors fetcher deployed under Prefect.
|
||||
"""
|
||||
|
||||
from prefect import flow, task, get_run_logger
|
||||
from datetime import datetime, timezone
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Setup paths
|
||||
_HERE = Path(__file__).parent
|
||||
sys.path.insert(0, str(_HERE))
|
||||
sys.path.insert(0, str(_HERE.parent / "external_factors"))
|
||||
|
||||
# Constants
|
||||
HZ_KEY = "exf_latest"
|
||||
HZ_MAP = "DOLPHIN_FEATURES"
|
||||
ACB_KEYS = ["funding_btc", "funding_eth", "dvol_btc", "dvol_eth",
|
||||
"fng", "vix", "ls_btc", "taker", "oi_btc"]
|
||||
|
||||
def hz_push_blocking(client, payload: dict) -> bool:
|
||||
"""Push indicators to Hazelcast (blocking, not a Prefect task)."""
|
||||
import json
|
||||
try:
|
||||
payload = dict(payload)
|
||||
payload["_pushed_at"] = datetime.now(timezone.utc).isoformat()
|
||||
client.get_map(HZ_MAP).blocking().put(HZ_KEY, json.dumps(payload))
|
||||
return True
|
||||
except Exception as e:
|
||||
return False
|
||||
|
||||
@flow(name="exf-production", log_prints=True)
|
||||
def exf_production_flow(warmup_s: int = 25):
|
||||
"""
|
||||
Production ExF fetcher - runs continuously under Prefect.
|
||||
"""
|
||||
from realtime_exf_service import RealTimeExFService
|
||||
from exf_persistence import ExFPersistenceService
|
||||
from _hz_push import make_hz_client
|
||||
import time
|
||||
|
||||
log = get_run_logger()
|
||||
|
||||
# Initialize services
|
||||
log.info("Starting RealTimeExFService...")
|
||||
svc = RealTimeExFService()
|
||||
svc.start()
|
||||
log.info(f"Warmup: {warmup_s}s")
|
||||
time.sleep(warmup_s)
|
||||
|
||||
log.info("Starting ExFPersistenceService...")
|
||||
persist = ExFPersistenceService(flush_interval_s=300)
|
||||
persist.start()
|
||||
|
||||
log.info("Connecting to Hazelcast...")
|
||||
client = make_hz_client()
|
||||
log.info("Hazelcast connected")
|
||||
|
||||
pushes = 0
|
||||
last_status = 0
|
||||
|
||||
log.info("=== EXF PRODUCTION LOOP STARTED ===")
|
||||
|
||||
try:
|
||||
while True:
|
||||
t0 = time.monotonic()
|
||||
|
||||
# Fetch indicators
|
||||
indicators = svc.get_indicators(dual_sample=True)
|
||||
staleness = indicators.pop("_staleness", {})
|
||||
|
||||
# Build payload
|
||||
payload = {k: v for k, v in indicators.items()
|
||||
if isinstance(v, (int, float, str, bool))}
|
||||
payload["_staleness_s"] = {k: round(v, 1)
|
||||
for k, v in staleness.items()
|
||||
if isinstance(v, (int, float))}
|
||||
|
||||
# Check ACB status
|
||||
acb_present = [k for k in ACB_KEYS
|
||||
if payload.get(k) is not None
|
||||
and isinstance(payload[k], (int, float))
|
||||
and payload[k] == payload[k]]
|
||||
|
||||
payload["_acb_ready"] = len(acb_present) == len(ACB_KEYS)
|
||||
payload["_acb_present"] = f"{len(acb_present)}/{len(ACB_KEYS)}"
|
||||
payload["_acb_missing"] = [k for k in ACB_KEYS if k not in acb_present]
|
||||
payload["_ok_count"] = len([k for k in payload.keys() if not k.startswith('_')])
|
||||
payload["_timestamp"] = datetime.now(timezone.utc).isoformat()
|
||||
|
||||
# Push to Hazelcast (direct call, not as Prefect task - avoids serialization issues)
|
||||
try:
|
||||
hz_push_blocking(client, payload)
|
||||
pushes += 1
|
||||
except Exception as e:
|
||||
log.warning(f"HZ push failed: {e}")
|
||||
|
||||
# Persist to disk
|
||||
persist.update_snapshot(payload)
|
||||
|
||||
# Log status every 60 seconds
|
||||
if time.time() - last_status > 60:
|
||||
stats = persist.get_stats()
|
||||
log.info(
|
||||
f"Status: pushes={pushes}, "
|
||||
f"acb={payload['_acb_present']}, "
|
||||
f"ready={payload['_acb_ready']}, "
|
||||
f"files={stats.get('files_written', 0)}"
|
||||
)
|
||||
last_status = time.time()
|
||||
|
||||
# Maintain 0.5s interval
|
||||
elapsed = time.monotonic() - t0
|
||||
sleep_time = max(0.0, 0.5 - elapsed)
|
||||
if sleep_time > 0:
|
||||
time.sleep(sleep_time)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
log.info("Shutdown requested")
|
||||
finally:
|
||||
log.info("Stopping services...")
|
||||
svc.stop()
|
||||
persist.stop()
|
||||
client.shutdown()
|
||||
log.info(f"Total pushes: {pushes}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
exf_production_flow()
|
||||
Reference in New Issue
Block a user