217 lines
8.0 KiB
Python
217 lines
8.0 KiB
Python
|
|
#!/usr/bin/env python3
|
||
|
|
"""
|
||
|
|
DOLPHIN — ExF (External Factors) Live Daemon v2.1 (Simplified)
|
||
|
|
===============================================================
|
||
|
|
Long-running process WITHOUT Prefect decoration for stability.
|
||
|
|
Direct implementation with persistence and monitoring.
|
||
|
|
|
||
|
|
Usage:
|
||
|
|
python exf_fetcher_simple.py # run with persistence
|
||
|
|
python exf_fetcher_simple.py --no-persist # run without persistence
|
||
|
|
"""
|
||
|
|
|
||
|
|
import sys
|
||
|
|
import json
|
||
|
|
import logging
|
||
|
|
import signal
|
||
|
|
import time
|
||
|
|
import argparse
|
||
|
|
from pathlib import Path
|
||
|
|
from datetime import datetime, timezone
|
||
|
|
|
||
|
|
# Setup paths
|
||
|
|
SCRIPT_DIR = Path(__file__).parent
|
||
|
|
sys.path.insert(0, str(SCRIPT_DIR))
|
||
|
|
sys.path.insert(0, str(SCRIPT_DIR.parent / "external_factors"))
|
||
|
|
sys.path.insert(0, str(SCRIPT_DIR.parent / "extf_docs"))
|
||
|
|
|
||
|
|
logging.basicConfig(
|
||
|
|
level=logging.INFO,
|
||
|
|
format="%(asctime)s [%(levelname)s] %(name)s — %(message)s",
|
||
|
|
)
|
||
|
|
logger = logging.getLogger("exf_fetcher")
|
||
|
|
|
||
|
|
# ── Constants ──────────────────────────────────────────────────────────────────
|
||
|
|
HZ_PUSH_INTERVAL_S = 0.5
|
||
|
|
WARMUP_S = 30
|
||
|
|
HZ_KEY = "exf_latest"
|
||
|
|
HZ_MAP_NAME = "DOLPHIN_FEATURES"
|
||
|
|
|
||
|
|
ACB_KEYS = frozenset([
|
||
|
|
"funding_btc", "funding_eth", "dvol_btc", "dvol_eth",
|
||
|
|
"fng", "vix", "ls_btc", "taker", "oi_btc",
|
||
|
|
])
|
||
|
|
|
||
|
|
ALL_EXPECTED_INDICATORS = [
|
||
|
|
'funding_btc', 'funding_eth', 'oi_btc', 'oi_eth', 'ls_btc', 'ls_eth',
|
||
|
|
'ls_top', 'taker', 'basis', 'imbal_btc', 'imbal_eth', 'spread',
|
||
|
|
'dvol_btc', 'dvol_eth', 'fund_dbt_btc', 'fund_dbt_eth',
|
||
|
|
'vix', 'dxy', 'us10y', 'sp500', 'fedfunds', 'fng', 'hashrate', 'tvl',
|
||
|
|
'liq_vol_24h', 'liq_long_ratio', 'liq_z_score', 'liq_percentile',
|
||
|
|
]
|
||
|
|
|
||
|
|
# ── Hazelcast Helper ───────────────────────────────────────────────────────────
|
||
|
|
|
||
|
|
def make_hz_client():
|
||
|
|
"""Create Hazelcast client."""
|
||
|
|
import hazelcast
|
||
|
|
return hazelcast.HazelcastClient(
|
||
|
|
cluster_name="dolphin",
|
||
|
|
cluster_members=["localhost:5701"],
|
||
|
|
connection_timeout=5.0,
|
||
|
|
)
|
||
|
|
|
||
|
|
def hz_push(client, key: str, data: dict) -> bool:
|
||
|
|
"""Push data to Hazelcast."""
|
||
|
|
try:
|
||
|
|
payload = dict(data)
|
||
|
|
payload["_pushed_at"] = datetime.now(timezone.utc).isoformat()
|
||
|
|
client.get_map(HZ_MAP_NAME).blocking().put(key, json.dumps(payload))
|
||
|
|
return True
|
||
|
|
except Exception as e:
|
||
|
|
logger.warning(f"HZ push failed: {e}")
|
||
|
|
return False
|
||
|
|
|
||
|
|
# ── Main Loop ──────────────────────────────────────────────────────────────────
|
||
|
|
|
||
|
|
def main_loop(warmup_s: int = WARMUP_S, enable_persistence: bool = True):
|
||
|
|
"""Main ExF fetcher loop."""
|
||
|
|
|
||
|
|
# Import here to ensure fresh modules
|
||
|
|
from realtime_exf_service import RealTimeExFService
|
||
|
|
|
||
|
|
# Start service
|
||
|
|
svc = RealTimeExFService()
|
||
|
|
svc.start()
|
||
|
|
logger.info(f"RealTimeExFService started — warmup {warmup_s}s")
|
||
|
|
time.sleep(warmup_s)
|
||
|
|
|
||
|
|
# Start persistence if enabled
|
||
|
|
persist_svc = None
|
||
|
|
if enable_persistence:
|
||
|
|
try:
|
||
|
|
from exf_persistence import ExFPersistenceService
|
||
|
|
persist_svc = ExFPersistenceService(flush_interval_s=300)
|
||
|
|
persist_svc.start()
|
||
|
|
logger.info("ExFPersistenceService started")
|
||
|
|
except Exception as e:
|
||
|
|
logger.warning(f"Persistence failed to start: {e}")
|
||
|
|
|
||
|
|
# Connect Hazelcast
|
||
|
|
client = None
|
||
|
|
try:
|
||
|
|
client = make_hz_client()
|
||
|
|
logger.info("Hazelcast connected")
|
||
|
|
except Exception as e:
|
||
|
|
logger.error(f"Hazelcast connection failed: {e}")
|
||
|
|
return
|
||
|
|
|
||
|
|
# State
|
||
|
|
push_count = 0
|
||
|
|
fail_count = 0
|
||
|
|
last_status_log = 0
|
||
|
|
|
||
|
|
logger.info(f"ExF loop live — pushing to HZ['{HZ_KEY}'] every {HZ_PUSH_INTERVAL_S}s")
|
||
|
|
logger.info(f"ACB-critical: {len(ACB_KEYS)}, Total expected: {len(ALL_EXPECTED_INDICATORS)}")
|
||
|
|
|
||
|
|
# Graceful shutdown
|
||
|
|
running = True
|
||
|
|
def on_signal(signum, frame):
|
||
|
|
nonlocal running
|
||
|
|
logger.info(f"Received signal {signum}, shutting down...")
|
||
|
|
running = False
|
||
|
|
|
||
|
|
signal.signal(signal.SIGINT, on_signal)
|
||
|
|
signal.signal(signal.SIGTERM, on_signal)
|
||
|
|
|
||
|
|
try:
|
||
|
|
while running:
|
||
|
|
t0 = time.monotonic()
|
||
|
|
|
||
|
|
# Get indicators
|
||
|
|
indicators = svc.get_indicators(dual_sample=True)
|
||
|
|
staleness = indicators.pop("_staleness", {})
|
||
|
|
|
||
|
|
# Build payload
|
||
|
|
payload = {
|
||
|
|
k: v for k, v in indicators.items()
|
||
|
|
if isinstance(v, (int, float, str, bool))
|
||
|
|
}
|
||
|
|
payload["_staleness_s"] = {k: round(v, 1) for k, v in staleness.items() if isinstance(v, (int, float))}
|
||
|
|
|
||
|
|
# Check ACB readiness
|
||
|
|
acb_present = set()
|
||
|
|
for k in ACB_KEYS:
|
||
|
|
val = payload.get(k)
|
||
|
|
if val is not None and isinstance(val, (int, float)) and val == val:
|
||
|
|
acb_present.add(k)
|
||
|
|
|
||
|
|
payload["_acb_ready"] = len(acb_present) == len(ACB_KEYS)
|
||
|
|
payload["_acb_present"] = f"{len(acb_present)}/{len(ACB_KEYS)}"
|
||
|
|
payload["_acb_missing"] = list(ACB_KEYS - acb_present) if len(acb_present) < len(ACB_KEYS) else []
|
||
|
|
payload["_ok_count"] = len([k for k in payload.keys() if not k.startswith('_')])
|
||
|
|
payload["_expected_count"] = len(ALL_EXPECTED_INDICATORS)
|
||
|
|
payload["_timestamp"] = datetime.now(timezone.utc).isoformat()
|
||
|
|
payload["_push_seq"] = push_count
|
||
|
|
|
||
|
|
# Push to Hazelcast
|
||
|
|
if hz_push(client, HZ_KEY, payload):
|
||
|
|
push_count += 1
|
||
|
|
else:
|
||
|
|
fail_count += 1
|
||
|
|
if fail_count % 10 == 1:
|
||
|
|
logger.warning(f"HZ push failed (fails={fail_count})")
|
||
|
|
try:
|
||
|
|
client.shutdown()
|
||
|
|
client = make_hz_client()
|
||
|
|
except:
|
||
|
|
pass
|
||
|
|
|
||
|
|
# Update persistence (off hot path)
|
||
|
|
if persist_svc:
|
||
|
|
try:
|
||
|
|
persist_svc.update_snapshot(payload)
|
||
|
|
except Exception as e:
|
||
|
|
logger.debug(f"Persistence update error: {e}")
|
||
|
|
|
||
|
|
# Periodic status log
|
||
|
|
if time.time() - last_status_log > 60:
|
||
|
|
st = svc.status()
|
||
|
|
suff = persist_svc.check_data_sufficiency() if persist_svc else {'score': 'N/A'}
|
||
|
|
|
||
|
|
logger.info(
|
||
|
|
f"Status | pushes={push_count} fails={fail_count} | "
|
||
|
|
f"indicators={st['indicators_ok']}/{st['indicators_total']} | "
|
||
|
|
f"acb={len(acb_present)}/{len(ACB_KEYS)} ready={payload['_acb_ready']} | "
|
||
|
|
f"sufficiency={suff.get('score', 'N/A')}"
|
||
|
|
)
|
||
|
|
|
||
|
|
if not payload['_acb_ready']:
|
||
|
|
logger.warning(f"ACB NOT READY — missing: {payload['_acb_missing']}")
|
||
|
|
|
||
|
|
last_status_log = time.time()
|
||
|
|
|
||
|
|
# Maintain interval
|
||
|
|
elapsed = time.monotonic() - t0
|
||
|
|
time.sleep(max(0.0, HZ_PUSH_INTERVAL_S - elapsed))
|
||
|
|
|
||
|
|
except KeyboardInterrupt:
|
||
|
|
logger.info("Interrupted")
|
||
|
|
finally:
|
||
|
|
logger.info("Shutting down...")
|
||
|
|
svc.stop()
|
||
|
|
if persist_svc:
|
||
|
|
persist_svc.stop()
|
||
|
|
if client:
|
||
|
|
client.shutdown()
|
||
|
|
logger.info(f"Stopped. pushes={push_count} fails={fail_count}")
|
||
|
|
|
||
|
|
|
||
|
|
if __name__ == "__main__":
|
||
|
|
parser = argparse.ArgumentParser(description="DOLPHIN ExF Simple Daemon")
|
||
|
|
parser.add_argument("--warmup", type=int, default=WARMUP_S)
|
||
|
|
parser.add_argument("--no-persist", action="store_true")
|
||
|
|
args = parser.parse_args()
|
||
|
|
|
||
|
|
main_loop(warmup_s=args.warmup, enable_persistence=not args.no_persist)
|