initial: import DOLPHIN baseline 2026-04-21 from dolphinng5_predict working tree
Includes core prod + GREEN/BLUE subsystems: - prod/ (BLUE harness, configs, scripts, docs) - nautilus_dolphin/ (GREEN Nautilus-native impl + dvae/ preserved) - adaptive_exit/ (AEM engine + models/bucket_assignments.pkl) - Observability/ (EsoF advisor, TUI, dashboards) - external_factors/ (EsoF producer) - mc_forewarning_qlabs_fork/ (MC regime/envelope) Excludes runtime caches, logs, backups, and reproducible artifacts per .gitignore.
This commit is contained in:
151
prod/esof_update_flow.py
Executable file
151
prod/esof_update_flow.py
Executable file
@@ -0,0 +1,151 @@
|
||||
"""DOLPHIN — EsoF (Esoteric Factors) Live Daemon
|
||||
================================================
|
||||
Long-running process that wraps EsotericFactorsService and streams its
|
||||
5-second state snapshot to Hazelcast every `HZ_PUSH_INTERVAL_S` seconds.
|
||||
|
||||
Architecture (from ExF_EsoF_Complete_Specification.md):
|
||||
- Local astropy / math calculations — NO external APIs, NO rate limits
|
||||
- EsotericFactorsService polls at 5s; <1ms retrieval via get_latest()
|
||||
- 6-hour TTL cache for expensive astro computations (moon, mercury)
|
||||
- HZ key: DOLPHIN_FEATURES['esof_latest']
|
||||
- Local JSON cache: external_factors/eso_cache/latest_esoteric_factors.json
|
||||
(atomic tmp→rename write; legacy fallback for disk-readers)
|
||||
|
||||
Indicators pushed:
|
||||
moon_illumination, moon_phase_name, mercury_retrograde,
|
||||
population_weighted_hour, liquidity_weighted_hour, liquidity_session,
|
||||
market_cycle_position, fibonacci_time, calendar, regional_times,
|
||||
timestamp, unix
|
||||
|
||||
Usage:
|
||||
python prod/esof_update_flow.py # run live
|
||||
python prod/esof_update_flow.py --poll 5 # override poll interval
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
import logging
|
||||
import signal
|
||||
import time
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from datetime import datetime, timezone
|
||||
|
||||
HCM_DIR = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(HCM_DIR / "external_factors"))
|
||||
sys.path.insert(0, str(HCM_DIR))
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s [%(levelname)s] %(name)s — %(message)s",
|
||||
)
|
||||
logger = logging.getLogger("esof_daemon")
|
||||
|
||||
# ── Constants ──────────────────────────────────────────────────────────────────
|
||||
HZ_PUSH_INTERVAL_S = 5 # push to HZ every 5 s (≤ scan resolution)
|
||||
POLL_INTERVAL_S = 5 # EsotericFactorsService internal poll
|
||||
WARMUP_S = 6 # wait for first compute cycle
|
||||
HZ_KEY = "esof_latest"
|
||||
LOCAL_CACHE_DIR = HCM_DIR / "external_factors" / "eso_cache"
|
||||
LOCAL_CACHE_FILE = LOCAL_CACHE_DIR / "latest_esoteric_factors.json"
|
||||
|
||||
|
||||
def _write_local_cache(data: dict):
|
||||
"""Atomic tmp→rename write to local JSON (legacy fallback)."""
|
||||
LOCAL_CACHE_DIR.mkdir(exist_ok=True)
|
||||
tmp = LOCAL_CACHE_DIR / "latest_esoteric_factors.tmp"
|
||||
try:
|
||||
with open(tmp, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
tmp.replace(LOCAL_CACHE_FILE)
|
||||
except Exception as e:
|
||||
logger.warning(f"Local cache write failed: {e}")
|
||||
|
||||
|
||||
# ── Main daemon ────────────────────────────────────────────────────────────────
|
||||
|
||||
def run(poll_interval_s: float = POLL_INTERVAL_S):
|
||||
from esoteric_factors_service import EsotericFactorsService
|
||||
from prod._hz_push import make_hz_client, hz_push
|
||||
|
||||
svc = EsotericFactorsService(poll_interval_s=poll_interval_s)
|
||||
svc.start()
|
||||
logger.info(f"EsotericFactorsService started (poll={poll_interval_s}s) — warmup {WARMUP_S}s …")
|
||||
time.sleep(WARMUP_S)
|
||||
|
||||
# Connect HZ
|
||||
client = None
|
||||
def _connect():
|
||||
nonlocal client
|
||||
try:
|
||||
if client:
|
||||
try: client.shutdown()
|
||||
except: pass
|
||||
client = make_hz_client()
|
||||
logger.info("Hazelcast connected")
|
||||
except Exception as e:
|
||||
logger.warning(f"HZ connect failed: {e}")
|
||||
client = None
|
||||
|
||||
_connect()
|
||||
|
||||
# Graceful shutdown
|
||||
_running = [True]
|
||||
def _stop(signum, frame):
|
||||
logger.info("Shutdown signal — stopping EsoF daemon")
|
||||
_running[0] = False
|
||||
signal.signal(signal.SIGINT, _stop)
|
||||
signal.signal(signal.SIGTERM, _stop)
|
||||
|
||||
push_count = 0
|
||||
fail_count = 0
|
||||
|
||||
logger.info(f"EsoF daemon live — pushing to HZ['{HZ_KEY}'] every {HZ_PUSH_INTERVAL_S}s")
|
||||
|
||||
while _running[0]:
|
||||
t0 = time.monotonic()
|
||||
|
||||
# Non-blocking <1ms read from in-memory state
|
||||
data = svc.get_latest()
|
||||
|
||||
if data:
|
||||
# Write local JSON cache (legacy fallback for disk-readers)
|
||||
_write_local_cache(data)
|
||||
|
||||
# Push to HZ
|
||||
if client is None:
|
||||
_connect()
|
||||
|
||||
if client and hz_push(HZ_KEY, data, client):
|
||||
push_count += 1
|
||||
if push_count % 12 == 1: # log every minute
|
||||
logger.info(
|
||||
f"EsoF push#{push_count} "
|
||||
f"moon={data.get('moon_phase_name')} "
|
||||
f"retro={data.get('mercury_retrograde')} "
|
||||
f"session={data.get('liquidity_session')} "
|
||||
f"cycle={data.get('market_cycle_position')}"
|
||||
)
|
||||
else:
|
||||
fail_count += 1
|
||||
logger.warning(f"HZ push failed (total fails={fail_count}) — reconnecting")
|
||||
_connect()
|
||||
else:
|
||||
logger.debug("EsoF state empty — service still warming up")
|
||||
|
||||
elapsed = time.monotonic() - t0
|
||||
time.sleep(max(0.0, HZ_PUSH_INTERVAL_S - elapsed))
|
||||
|
||||
svc.stop()
|
||||
if client:
|
||||
try: client.shutdown()
|
||||
except: pass
|
||||
logger.info(f"EsoF daemon stopped. pushes={push_count} fails={fail_count}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="DOLPHIN EsoF live daemon")
|
||||
parser.add_argument("--poll", type=float, default=POLL_INTERVAL_S,
|
||||
help=f"Internal poll interval for EsotericFactorsService (default {POLL_INTERVAL_S}s)")
|
||||
args = parser.parse_args()
|
||||
run(poll_interval_s=args.poll)
|
||||
Reference in New Issue
Block a user