Files
DOLPHIN/prod/clean_arch/adapters/hazelcast_feed.py
hjnormey 01c19662cb initial: import DOLPHIN baseline 2026-04-21 from dolphinng5_predict working tree
Includes core prod + GREEN/BLUE subsystems:
- prod/ (BLUE harness, configs, scripts, docs)
- nautilus_dolphin/ (GREEN Nautilus-native impl + dvae/ preserved)
- adaptive_exit/ (AEM engine + models/bucket_assignments.pkl)
- Observability/ (EsoF advisor, TUI, dashboards)
- external_factors/ (EsoF producer)
- mc_forewarning_qlabs_fork/ (MC regime/envelope)

Excludes runtime caches, logs, backups, and reproducible artifacts per .gitignore.
2026-04-21 16:58:38 +02:00

176 lines
6.0 KiB
Python
Executable File

#!/usr/bin/env python3
"""
ADAPTER: HazelcastDataFeed
==========================
Implementation of DataFeedPort using Hazelcast.
Current implementation - uses DolphinNG6 data feed.
All data (price + eigenvalues) from single source, same timestamp.
"""
import json
import logging
from datetime import datetime
from typing import Optional, Callable, Dict, Any
# Port interface
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent))
from ports.data_feed import DataFeedPort, MarketSnapshot, ACBUpdate
logger = logging.getLogger("HazelcastDataFeed")
class HazelcastDataFeed(DataFeedPort):
"""
ADAPTER: Hazelcast implementation of DataFeedPort.
Reads from DolphinNG6 output via Hazelcast maps:
- DOLPHIN_FEATURES: Price + eigenvalues (ALWAYS SYNCED)
- DOLPHIN_SAFETY: Posture/mode
- DOLPHIN_STATE_*: Portfolio state
No sync issues - all data written atomically by DolphinNG6.
"""
def __init__(self, config: Dict[str, Any]):
self.config = config
self.hz_client = None
self.features_map = None
self.safety_map = None
self._last_snapshot: Optional[MarketSnapshot] = None
self._latency_ms = 0.0
async def connect(self) -> bool:
"""Connect to Hazelcast cluster."""
try:
import hazelcast
hz_config = self.config.get('hazelcast', {})
cluster = hz_config.get('cluster', 'dolphin')
host = hz_config.get('host', 'localhost:5701')
logger.info(f"Connecting to Hazelcast: {host} (cluster: {cluster})")
self.hz_client = hazelcast.HazelcastClient(
cluster_name=cluster,
cluster_members=[host],
)
# Get reference to maps
self.features_map = self.hz_client.get_map('DOLPHIN_FEATURES').blocking()
self.safety_map = self.hz_client.get_map('DOLPHIN_SAFETY').blocking()
# Test connection
size = self.features_map.size()
logger.info(f"[✓] Connected. Features map: {size} entries")
return True
except Exception as e:
logger.error(f"[✗] Connection failed: {e}")
return False
async def disconnect(self):
"""Clean disconnect."""
if self.hz_client:
self.hz_client.shutdown()
logger.info("[✓] Disconnected from Hazelcast")
async def get_latest_snapshot(self, symbol: str = "BTCUSDT") -> Optional[MarketSnapshot]:
"""
Get latest synchronized snapshot from Hazelcast.
Reads 'latest_eigen_scan' which contains:
- prices[]: Array of prices for all assets
- eigenvalues[]: Computed eigenvalues
- assets[]: Asset symbols
- scan_number: Sequence number
- timestamp: Unix timestamp
All fields from SAME 5s pulse - GUARANTEED SYNCED.
"""
try:
start = datetime.utcnow()
raw = self.features_map.get("latest_eigen_scan")
if not raw:
return self._last_snapshot # Return cached if available
data = json.loads(raw)
# Find index for requested symbol
assets = data.get('assets', [])
if symbol not in assets:
logger.warning(f"Symbol {symbol} not in assets list: {assets[:5]}...")
return None
idx = assets.index(symbol)
prices = data.get('asset_prices', []) # Note: field is asset_prices, not prices
eigenvalues = data.get('asset_loadings', []) # Note: field is asset_loadings
# Build snapshot
snapshot = MarketSnapshot(
timestamp=datetime.utcnow(), # Or parse from data['timestamp']
symbol=symbol,
price=float(prices[idx]) if idx < len(prices) else 0.0,
eigenvalues=[float(e) for e in eigenvalues] if eigenvalues else [],
velocity_divergence=data.get('vel_div'),
irp_alignment=data.get('irp_alignment'),
scan_number=data.get('scan_number'),
source="hazelcast"
)
self._last_snapshot = snapshot
# Calculate latency
self._latency_ms = (datetime.utcnow() - start).total_seconds() * 1000
return snapshot
except Exception as e:
logger.error(f"Error reading snapshot: {e}")
return self._last_snapshot
async def subscribe_snapshots(self, callback: Callable[[MarketSnapshot], None]):
"""
Subscribe to snapshot updates via polling (listener not critical).
Polling every 5s matches DolphinNG6 pulse.
"""
logger.info("[✓] Snapshot subscription ready (polling mode)")
async def get_acb_update(self) -> Optional[ACBUpdate]:
"""Get ACB update from Hazelcast."""
try:
# ACB might be in features or separate map
raw = self.features_map.get("latest_acb")
if raw:
data = json.loads(raw)
return ACBUpdate(
timestamp=datetime.utcnow(),
boost=data.get('boost', 1.0),
beta=data.get('beta', 0.5),
cut=data.get('cut', 0.0),
posture=data.get('posture', 'APEX')
)
except Exception as e:
logger.error(f"ACB read error: {e}")
return None
def get_latency_ms(self) -> float:
"""Return last measured latency."""
return self._latency_ms
def health_check(self) -> bool:
"""Check Hazelcast connection health."""
if not self.hz_client:
return False
try:
# Quick ping
self.features_map.size()
return True
except:
return False