initial: import DOLPHIN baseline 2026-04-21 from dolphinng5_predict working tree
Includes core prod + GREEN/BLUE subsystems: - prod/ (BLUE harness, configs, scripts, docs) - nautilus_dolphin/ (GREEN Nautilus-native impl + dvae/ preserved) - adaptive_exit/ (AEM engine + models/bucket_assignments.pkl) - Observability/ (EsoF advisor, TUI, dashboards) - external_factors/ (EsoF producer) - mc_forewarning_qlabs_fork/ (MC regime/envelope) Excludes runtime caches, logs, backups, and reproducible artifacts per .gitignore.
This commit is contained in:
587
nautilus_dolphin/test_pf_ob_intelligence.py
Executable file
587
nautilus_dolphin/test_pf_ob_intelligence.py
Executable file
@@ -0,0 +1,587 @@
|
||||
"""OB Intelligence — 4-Subsystem Test Suite.
|
||||
|
||||
Phase 1: Data validation (CSVOBProvider)
|
||||
Phase 2: Feature computation (all 4 sub-systems)
|
||||
Phase 3: Signal quality (forward return correlation)
|
||||
Phase 4: Per-asset placement quality
|
||||
Phase 5: Market-wide detection
|
||||
Phase 6: Macro regime detection
|
||||
Phase 7: Engine integration (no regression vs baseline)
|
||||
"""
|
||||
import sys, time, math
|
||||
from pathlib import Path
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
|
||||
OB_DATA_DIR = Path(r"C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict\ob_data")
|
||||
VBT_DIR = Path(r"C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict\vbt_cache")
|
||||
|
||||
# ============================================================================
|
||||
# Phase 0: Compile numba kernels
|
||||
# ============================================================================
|
||||
print("=" * 70)
|
||||
print(" OB INTELLIGENCE 4-SUBSYSTEM TEST SUITE")
|
||||
print("=" * 70)
|
||||
print("\nPhase 0: Compiling numba kernels...")
|
||||
t0c = time.time()
|
||||
|
||||
from nautilus_dolphin.nautilus.alpha_asset_selector import compute_irp_nb, compute_ars_nb, rank_assets_irp_nb
|
||||
from nautilus_dolphin.nautilus.alpha_bet_sizer import compute_sizing_nb
|
||||
from nautilus_dolphin.nautilus.alpha_signal_generator import check_dc_nb
|
||||
from nautilus_dolphin.nautilus.ob_features import (
|
||||
compute_imbalance_nb, compute_depth_1pct_nb, compute_depth_quality_nb,
|
||||
compute_fill_probability_nb, compute_spread_proxy_nb, compute_depth_asymmetry_nb,
|
||||
compute_imbalance_persistence_nb, compute_withdrawal_velocity_nb,
|
||||
compute_market_agreement_nb, compute_cascade_signal_nb,
|
||||
OBPlacementFeatures, OBSignalFeatures, OBMarketFeatures, OBMacroFeatures,
|
||||
NEUTRAL_PLACEMENT, NEUTRAL_SIGNAL, NEUTRAL_MARKET, NEUTRAL_MACRO,
|
||||
OBFeatureEngine,
|
||||
)
|
||||
from nautilus_dolphin.nautilus.ob_provider import CSVOBProvider, MockOBProvider
|
||||
from nautilus_dolphin.nautilus.ob_placer import OBPlacer
|
||||
|
||||
# Warmup JIT
|
||||
_p = np.array([1.0, 2.0, 3.0], dtype=np.float64)
|
||||
compute_irp_nb(_p, -1); compute_ars_nb(1.0, 0.5, 0.01)
|
||||
rank_assets_irp_nb(np.ones((10, 2), dtype=np.float64), 8, -1, 5, 500.0, 20, 0.20)
|
||||
compute_sizing_nb(-0.03, -0.02, -0.05, 3.0, 0.5, 5.0, 0.20, True, True, 0.0,
|
||||
np.zeros(4, dtype=np.int64), np.zeros(4, dtype=np.int64),
|
||||
np.zeros(5, dtype=np.float64), 0, -1, 0.01, 0.04)
|
||||
check_dc_nb(_p, 3, 1, 0.75)
|
||||
|
||||
# Warmup OB kernels
|
||||
_b = np.array([100.0, 200.0, 300.0, 400.0, 500.0], dtype=np.float64)
|
||||
_a = np.array([110.0, 190.0, 310.0, 390.0, 510.0], dtype=np.float64)
|
||||
compute_imbalance_nb(_b, _a)
|
||||
compute_depth_1pct_nb(_b, _a)
|
||||
compute_depth_quality_nb(210.0, 200.0)
|
||||
compute_fill_probability_nb(1.0)
|
||||
compute_spread_proxy_nb(_b, _a)
|
||||
compute_depth_asymmetry_nb(_b, _a)
|
||||
compute_imbalance_persistence_nb(np.array([0.1, -0.1, 0.2, 0.3], dtype=np.float64), 4)
|
||||
compute_withdrawal_velocity_nb(np.array([100.0, 110.0, 105.0], dtype=np.float64), 2)
|
||||
compute_market_agreement_nb(np.array([0.1, -0.05, 0.2, 0.15], dtype=np.float64), 4)
|
||||
compute_cascade_signal_nb(np.array([-0.05, -0.15, 0.02, -0.12], dtype=np.float64), 4, -0.10)
|
||||
|
||||
print(f" JIT compile: {time.time() - t0c:.1f}s")
|
||||
|
||||
from nautilus_dolphin.nautilus.alpha_orchestrator import NDAlphaEngine
|
||||
from nautilus_dolphin.nautilus.adaptive_circuit_breaker import AdaptiveCircuitBreaker
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Phase 1: Data Validation
|
||||
# ============================================================================
|
||||
print(f"\n{'='*70}")
|
||||
print(" PHASE 1: DATA VALIDATION (CSVOBProvider)")
|
||||
print(f"{'='*70}")
|
||||
|
||||
csv_provider = CSVOBProvider(str(OB_DATA_DIR))
|
||||
csv_assets = csv_provider.get_assets()
|
||||
print(f"\n Available assets: {csv_assets}")
|
||||
|
||||
phase1_ok = True
|
||||
for asset in csv_assets:
|
||||
ts = csv_provider.get_all_timestamps(asset)
|
||||
n = len(ts)
|
||||
snap0 = csv_provider.get_snapshot(asset, ts[0]) if n > 0 else None
|
||||
snap_last = csv_provider.get_snapshot(asset, ts[-1]) if n > 0 else None
|
||||
|
||||
# Check integrity
|
||||
nan_count = 0
|
||||
neg_count = 0
|
||||
for i in range(min(n, 100)): # Sample first 100
|
||||
s = csv_provider.get_snapshot(asset, ts[i])
|
||||
if s is None:
|
||||
continue
|
||||
if np.any(np.isnan(s.bid_notional)) or np.any(np.isnan(s.ask_notional)):
|
||||
nan_count += 1
|
||||
if np.any(s.bid_notional < 0) or np.any(s.ask_notional < 0):
|
||||
neg_count += 1
|
||||
|
||||
t_range = (ts[-1] - ts[0]) / 3600 if n > 1 else 0
|
||||
print(f"\n {asset}:")
|
||||
print(f" Snapshots: {n}")
|
||||
print(f" Time range: {t_range:.1f} hours")
|
||||
if snap0:
|
||||
print(f" Bid depth (1%): ${snap0.bid_notional[0]:,.0f}")
|
||||
print(f" Ask depth (1%): ${snap0.ask_notional[0]:,.0f}")
|
||||
print(f" Total 5-level: ${sum(snap0.bid_notional) + sum(snap0.ask_notional):,.0f}")
|
||||
if nan_count > 0:
|
||||
print(f" WARNING: {nan_count} snapshots with NaN")
|
||||
phase1_ok = False
|
||||
if neg_count > 0:
|
||||
print(f" WARNING: {neg_count} snapshots with negative notional")
|
||||
phase1_ok = False
|
||||
if n == 0:
|
||||
print(f" WARNING: No data")
|
||||
phase1_ok = False
|
||||
|
||||
print(f"\n Phase 1: {'PASS' if phase1_ok else 'FAIL'}")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Phase 2: Feature Computation (All 4 Sub-systems)
|
||||
# ============================================================================
|
||||
print(f"\n{'='*70}")
|
||||
print(" PHASE 2: FEATURE COMPUTATION (4 SUB-SYSTEMS)")
|
||||
print(f"{'='*70}")
|
||||
|
||||
t2 = time.time()
|
||||
# Use CSVOBProvider for real data feature computation
|
||||
ob_engine_csv = OBFeatureEngine(csv_provider)
|
||||
if csv_assets:
|
||||
ob_engine_csv.preload_date("2025-01-15", csv_assets)
|
||||
print(f"\n Preload time: {time.time() - t2:.2f}s")
|
||||
|
||||
phase2_ok = True
|
||||
for asset in csv_assets:
|
||||
n = csv_provider.get_snapshot_count(asset)
|
||||
if n == 0:
|
||||
continue
|
||||
|
||||
# Collect features across all snapshots
|
||||
imbalances = []
|
||||
imbalances_ma5 = []
|
||||
persistences = []
|
||||
depth_qualities = []
|
||||
fill_probs = []
|
||||
spreads = []
|
||||
|
||||
for snap_idx in range(n):
|
||||
p = ob_engine_csv._preloaded_placement.get(asset, {}).get(snap_idx)
|
||||
s = ob_engine_csv._preloaded_signal.get(asset, {}).get(snap_idx)
|
||||
if p:
|
||||
depth_qualities.append(p.depth_quality)
|
||||
fill_probs.append(p.fill_probability)
|
||||
spreads.append(p.spread_proxy_bps)
|
||||
if s:
|
||||
imbalances.append(s.imbalance)
|
||||
imbalances_ma5.append(s.imbalance_ma5)
|
||||
persistences.append(s.imbalance_persistence)
|
||||
|
||||
print(f"\n {asset} Feature Distributions:")
|
||||
if imbalances:
|
||||
imb = np.array(imbalances)
|
||||
print(f" Sub-2 Imbalance: mean={np.mean(imb):.4f} std={np.std(imb):.4f} "
|
||||
f"p5={np.percentile(imb,5):.4f} p50={np.percentile(imb,50):.4f} p95={np.percentile(imb,95):.4f}")
|
||||
# Validate ranges
|
||||
if np.min(imb) < -1.0 or np.max(imb) > 1.0:
|
||||
print(f" FAIL: imbalance out of [-1, 1]")
|
||||
phase2_ok = False
|
||||
if persistences:
|
||||
per = np.array(persistences)
|
||||
print(f" Sub-2 Persistence: mean={np.mean(per):.4f} std={np.std(per):.4f} "
|
||||
f"p5={np.percentile(per,5):.4f} p50={np.percentile(per,50):.4f} p95={np.percentile(per,95):.4f}")
|
||||
if np.min(per) < 0 or np.max(per) > 1.0:
|
||||
print(f" FAIL: persistence out of [0, 1]")
|
||||
phase2_ok = False
|
||||
if depth_qualities:
|
||||
dq = np.array(depth_qualities)
|
||||
print(f" Sub-1 DepthQual: mean={np.mean(dq):.4f} std={np.std(dq):.4f} "
|
||||
f"p5={np.percentile(dq,5):.4f} p50={np.percentile(dq,50):.4f} p95={np.percentile(dq,95):.4f}")
|
||||
if np.min(dq) < 0:
|
||||
print(f" FAIL: depth_quality < 0")
|
||||
phase2_ok = False
|
||||
if fill_probs:
|
||||
fp = np.array(fill_probs)
|
||||
print(f" Sub-1 FillProb: mean={np.mean(fp):.4f} std={np.std(fp):.4f} "
|
||||
f"p5={np.percentile(fp,5):.4f} p50={np.percentile(fp,50):.4f} p95={np.percentile(fp,95):.4f}")
|
||||
if np.min(fp) < 0 or np.max(fp) > 1.0:
|
||||
print(f" FAIL: fill_probability out of [0, 1]")
|
||||
phase2_ok = False
|
||||
|
||||
# Market-wide features (Sub-3)
|
||||
print(f"\n Sub-3 Market-Wide Features:")
|
||||
mkt_med_imb = []
|
||||
mkt_agree = []
|
||||
n_ref = csv_provider.get_snapshot_count(csv_assets[0]) if csv_assets else 0
|
||||
for snap_idx in range(n_ref):
|
||||
m = ob_engine_csv._preloaded_market.get(snap_idx)
|
||||
if m:
|
||||
mkt_med_imb.append(m.median_imbalance)
|
||||
mkt_agree.append(m.agreement_pct)
|
||||
|
||||
if mkt_med_imb:
|
||||
mi = np.array(mkt_med_imb)
|
||||
ag = np.array(mkt_agree)
|
||||
print(f" Median imbalance: mean={np.mean(mi):.4f} std={np.std(mi):.4f}")
|
||||
print(f" Agreement pct: mean={np.mean(ag):.4f} std={np.std(ag):.4f} "
|
||||
f"p5={np.percentile(ag,5):.4f} p95={np.percentile(ag,95):.4f}")
|
||||
high_agree = np.sum(ag > 0.8) / len(ag) * 100
|
||||
print(f" Snapshots with agreement > 0.8: {high_agree:.1f}%")
|
||||
if np.min(ag) < 0 or np.max(ag) > 1.0:
|
||||
print(f" FAIL: agreement out of [0, 1]")
|
||||
phase2_ok = False
|
||||
|
||||
# Sub-4 Macro
|
||||
macro = ob_engine_csv.get_macro()
|
||||
print(f"\n Sub-4 Macro Regime (final state):")
|
||||
print(f" Depth velocity: {macro.depth_velocity:.4f}")
|
||||
print(f" Cascade count: {macro.cascade_count}")
|
||||
print(f" Acceleration: {macro.acceleration:.6f}")
|
||||
print(f" Regime signal: {macro.regime_signal} ({['CALM','NEUTRAL','STRESS'][macro.regime_signal + 1]})")
|
||||
|
||||
print(f"\n Phase 2: {'PASS' if phase2_ok else 'FAIL'}")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Phase 3: Signal Quality (OB Feature Correlation with Itself)
|
||||
# ============================================================================
|
||||
print(f"\n{'='*70}")
|
||||
print(" PHASE 3: OB FEATURE INTERNAL CONSISTENCY")
|
||||
print(f"{'='*70}")
|
||||
|
||||
# Check that imbalance and imbalance_ma5 are correlated (sanity check)
|
||||
if csv_assets:
|
||||
asset = csv_assets[0]
|
||||
n = csv_provider.get_snapshot_count(asset)
|
||||
raw_imb = []
|
||||
ma5_imb = []
|
||||
for snap_idx in range(n):
|
||||
s = ob_engine_csv._preloaded_signal.get(asset, {}).get(snap_idx)
|
||||
if s:
|
||||
raw_imb.append(s.imbalance)
|
||||
ma5_imb.append(s.imbalance_ma5)
|
||||
if len(raw_imb) > 10:
|
||||
from scipy import stats
|
||||
corr, pval = stats.spearmanr(raw_imb, ma5_imb)
|
||||
print(f"\n {asset}: imbalance vs imbalance_ma5 Spearman r={corr:.4f} (p={pval:.2e})")
|
||||
if corr < 0.5:
|
||||
print(f" WARNING: Low correlation between raw and smoothed imbalance")
|
||||
|
||||
# Check fill_prob vs depth_quality monotonicity
|
||||
dqs = []
|
||||
fps = []
|
||||
for snap_idx in range(n):
|
||||
p = ob_engine_csv._preloaded_placement.get(asset, {}).get(snap_idx)
|
||||
if p:
|
||||
dqs.append(p.depth_quality)
|
||||
fps.append(p.fill_probability)
|
||||
if len(dqs) > 10:
|
||||
corr2, pval2 = stats.spearmanr(dqs, fps)
|
||||
print(f" {asset}: depth_quality vs fill_prob Spearman r={corr2:.4f} (p={pval2:.2e})")
|
||||
if corr2 < 0.9:
|
||||
print(f" WARNING: fill_prob should be monotonically related to depth_quality")
|
||||
|
||||
print(f"\n Phase 3: PASS (informational)")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Phase 4: Per-Asset Placement Quality
|
||||
# ============================================================================
|
||||
print(f"\n{'='*70}")
|
||||
print(" PHASE 4: PER-ASSET PLACEMENT QUALITY")
|
||||
print(f"{'='*70}")
|
||||
|
||||
for asset in csv_assets:
|
||||
n = csv_provider.get_snapshot_count(asset)
|
||||
if n == 0:
|
||||
continue
|
||||
dqs = [ob_engine_csv._preloaded_placement.get(asset, {}).get(i) for i in range(n)]
|
||||
dqs = [d for d in dqs if d is not None]
|
||||
mean_dq = np.mean([d.depth_quality for d in dqs])
|
||||
mean_fp = np.mean([d.fill_probability for d in dqs])
|
||||
mean_sp = np.mean([d.spread_proxy_bps for d in dqs])
|
||||
print(f"\n {asset}:")
|
||||
print(f" Mean depth quality: {mean_dq:.3f}")
|
||||
print(f" Mean fill prob: {mean_fp:.3f}")
|
||||
print(f" Mean spread proxy: {mean_sp:.3f} bps")
|
||||
|
||||
# SmartPlacer advice test
|
||||
print(f"\n SmartPlacer Advice (sample):")
|
||||
placer = OBPlacer()
|
||||
if csv_assets:
|
||||
asset = csv_assets[0]
|
||||
n = csv_provider.get_snapshot_count(asset)
|
||||
mid = n // 2
|
||||
p = ob_engine_csv._preloaded_placement.get(asset, {}).get(mid, NEUTRAL_PLACEMENT)
|
||||
s = ob_engine_csv._preloaded_signal.get(asset, {}).get(mid, NEUTRAL_SIGNAL)
|
||||
for conf in [0.3, 0.5, 0.7, 0.9]:
|
||||
advice = placer.advise(p, s, signal_confidence=conf, direction=-1)
|
||||
print(f" conf={conf:.1f}: {advice.method:15s} offset={advice.offset_bps:.2f}bps "
|
||||
f"timeout={advice.timeout_s:.0f}s reason={advice.reason}")
|
||||
|
||||
print(f"\n Phase 4: PASS (informational)")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Phase 5: Market-Wide Agreement Detection
|
||||
# ============================================================================
|
||||
print(f"\n{'='*70}")
|
||||
print(" PHASE 5: MARKET-WIDE AGREEMENT DETECTION")
|
||||
print(f"{'='*70}")
|
||||
|
||||
if mkt_agree:
|
||||
ag = np.array(mkt_agree)
|
||||
mi = np.array(mkt_med_imb)
|
||||
|
||||
# Top 10 high-agreement moments
|
||||
sorted_idx = np.argsort(ag)[::-1]
|
||||
print(f"\n Top 10 high-agreement snapshots:")
|
||||
for i in range(min(10, len(sorted_idx))):
|
||||
idx = sorted_idx[i]
|
||||
print(f" snap {idx:5d}: agreement={ag[idx]:.3f} median_imb={mi[idx]:+.4f}")
|
||||
|
||||
# Distribution of agreement
|
||||
print(f"\n Agreement distribution:")
|
||||
for thresh in [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
|
||||
pct = np.sum(ag >= thresh) / len(ag) * 100
|
||||
print(f" >= {thresh:.1f}: {pct:5.1f}%")
|
||||
|
||||
print(f"\n Phase 5: PASS (informational)")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Phase 6: Macro Regime Detection (Sub-4)
|
||||
# ============================================================================
|
||||
print(f"\n{'='*70}")
|
||||
print(" PHASE 6: MACRO REGIME DETECTION")
|
||||
print(f"{'='*70}")
|
||||
|
||||
# Trace macro features through time using the preloaded data
|
||||
# We can examine the final macro state and trace snapshots for regime transitions
|
||||
print(f"\n Final macro state:")
|
||||
print(f" Depth velocity: {macro.depth_velocity:+.4f}")
|
||||
print(f" Cascade count: {macro.cascade_count}")
|
||||
print(f" Regime signal: {macro.regime_signal}")
|
||||
|
||||
# Simulate macro tracking across snapshots using Mock
|
||||
mock_stress = MockOBProvider(imbalance_bias=-0.3, depth_scale=0.3, assets=csv_assets[:4] or ["BTCUSDT", "ETHUSDT"])
|
||||
mock_calm = MockOBProvider(imbalance_bias=0.1, depth_scale=2.0, assets=csv_assets[:4] or ["BTCUSDT", "ETHUSDT"])
|
||||
mock_neutral = MockOBProvider(imbalance_bias=0.0, depth_scale=1.0, assets=csv_assets[:4] or ["BTCUSDT", "ETHUSDT"])
|
||||
|
||||
for label, provider in [("STRESS", mock_stress), ("CALM", mock_calm), ("NEUTRAL", mock_neutral)]:
|
||||
engine = OBFeatureEngine(provider)
|
||||
engine.preload_date("test", provider.get_assets())
|
||||
m = engine.get_macro()
|
||||
print(f"\n Mock {label}: regime={m.regime_signal} velocity={m.depth_velocity:+.4f} cascade={m.cascade_count}")
|
||||
|
||||
print(f"\n Phase 6: PASS (informational)")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Phase 7: Engine Integration (No Regression)
|
||||
# ============================================================================
|
||||
print(f"\n{'='*70}")
|
||||
print(" PHASE 7: ENGINE INTEGRATION (BASELINE REGRESSION CHECK)")
|
||||
print(f"{'='*70}")
|
||||
|
||||
META_COLS = {'timestamp', 'scan_number', 'v50_lambda_max_velocity', 'v150_lambda_max_velocity',
|
||||
'v300_lambda_max_velocity', 'v750_lambda_max_velocity', 'vel_div',
|
||||
'instability_50', 'instability_150'}
|
||||
ENGINE_KWARGS = dict(
|
||||
initial_capital=25000.0, vel_div_threshold=-0.02, vel_div_extreme=-0.05,
|
||||
min_leverage=0.5, max_leverage=5.0, leverage_convexity=3.0,
|
||||
fraction=0.20, fixed_tp_pct=0.0099, stop_pct=1.0, max_hold_bars=120,
|
||||
use_direction_confirm=True, dc_lookback_bars=7, dc_min_magnitude_bps=0.75,
|
||||
dc_skip_contradicts=True, dc_leverage_boost=1.0, dc_leverage_reduce=0.5,
|
||||
use_asset_selection=True, min_irp_alignment=0.45,
|
||||
use_sp_fees=True, use_sp_slippage=True,
|
||||
sp_maker_entry_rate=0.62, sp_maker_exit_rate=0.50,
|
||||
use_ob_edge=True, ob_edge_bps=5.0, ob_confirm_rate=0.40,
|
||||
lookback=100, use_alpha_layers=True, use_dynamic_leverage=True, seed=42,
|
||||
)
|
||||
VD_THRESH = -0.02; VD_EXTREME = -0.05; CONVEXITY = 3.0
|
||||
|
||||
parquet_files = sorted(VBT_DIR.glob("*.parquet"))
|
||||
if not parquet_files:
|
||||
print("\n WARNING: No parquet files in VBT_DIR, skipping Phase 7")
|
||||
else:
|
||||
# Initialize ACB
|
||||
acb = AdaptiveCircuitBreaker()
|
||||
date_strings = [pf.stem for pf in parquet_files]
|
||||
acb.preload_w750(date_strings)
|
||||
|
||||
# Pre-load data
|
||||
all_vols = []
|
||||
for pf in parquet_files[:2]:
|
||||
df = pd.read_parquet(pf)
|
||||
if 'BTCUSDT' not in df.columns: continue
|
||||
pr = df['BTCUSDT'].values
|
||||
for i in range(60, len(pr)):
|
||||
seg = pr[max(0,i-50):i]
|
||||
if len(seg)<10: continue
|
||||
v = float(np.std(np.diff(seg)/seg[:-1]))
|
||||
if v > 0: all_vols.append(v)
|
||||
vol_p60 = float(np.percentile(all_vols, 60)) if all_vols else 0.001
|
||||
|
||||
pq_data = {}
|
||||
for pf in parquet_files:
|
||||
df = pd.read_parquet(pf)
|
||||
ac = [c for c in df.columns if c not in META_COLS]
|
||||
bp = df['BTCUSDT'].values if 'BTCUSDT' in df.columns else None
|
||||
dv = np.full(len(df), np.nan)
|
||||
if bp is not None:
|
||||
for i in range(50, len(bp)):
|
||||
seg = bp[max(0,i-50):i]
|
||||
if len(seg)<10: continue
|
||||
dv[i] = float(np.std(np.diff(seg)/seg[:-1]))
|
||||
pq_data[pf.stem] = (df, ac, dv)
|
||||
|
||||
def strength_cubic(vel_div):
|
||||
if vel_div >= VD_THRESH: return 0.0
|
||||
raw = (VD_THRESH - vel_div) / (VD_THRESH - VD_EXTREME)
|
||||
return min(1.0, max(0.0, raw)) ** CONVEXITY
|
||||
|
||||
def run_engine(label, ob_engine_instance=None):
|
||||
"""Run full backtest with optional OB engine."""
|
||||
engine = NDAlphaEngine(**ENGINE_KWARGS)
|
||||
if ob_engine_instance is not None:
|
||||
engine.set_ob_engine(ob_engine_instance)
|
||||
|
||||
bar_idx = 0; ph = {}; dstats = []
|
||||
for pf in parquet_files:
|
||||
ds = pf.stem; cs = engine.capital
|
||||
engine.regime_direction = -1
|
||||
engine.regime_dd_halt = False
|
||||
|
||||
acb_info = acb.get_dynamic_boost_for_date(ds, ob_engine=ob_engine_instance)
|
||||
base_boost = acb_info['boost']
|
||||
beta = acb_info['beta']
|
||||
|
||||
df, acols, dvol = pq_data[ds]
|
||||
bid = 0
|
||||
for ri in range(len(df)):
|
||||
row = df.iloc[ri]; vd = row.get("vel_div")
|
||||
if vd is None or not np.isfinite(vd): bar_idx+=1; bid+=1; continue
|
||||
prices = {}
|
||||
for ac in acols:
|
||||
p = row[ac]
|
||||
if p and p > 0 and np.isfinite(p):
|
||||
prices[ac] = float(p)
|
||||
if ac not in ph: ph[ac] = []
|
||||
ph[ac].append(float(p))
|
||||
if not prices: bar_idx+=1; bid+=1; continue
|
||||
vrok = False if bid < 100 else (np.isfinite(dvol[ri]) and dvol[ri] > vol_p60)
|
||||
|
||||
if beta > 0 and base_boost > 1.0:
|
||||
ss = strength_cubic(float(vd))
|
||||
engine.regime_size_mult = base_boost * (1.0 + beta * ss)
|
||||
else:
|
||||
engine.regime_size_mult = base_boost
|
||||
|
||||
engine.process_bar(bar_idx=bar_idx, vel_div=float(vd), prices=prices,
|
||||
vol_regime_ok=vrok, price_histories=ph)
|
||||
bar_idx+=1; bid+=1
|
||||
dstats.append({'date': ds, 'pnl': engine.capital - cs, 'cap': engine.capital, 'beta': beta})
|
||||
|
||||
tr = engine.trade_history
|
||||
w = [t for t in tr if t.pnl_absolute > 0]; l = [t for t in tr if t.pnl_absolute <= 0]
|
||||
gw = sum(t.pnl_absolute for t in w) if w else 0
|
||||
gl = abs(sum(t.pnl_absolute for t in l)) if l else 0
|
||||
roi = (engine.capital - 25000) / 25000 * 100
|
||||
pf_val = gw / gl if gl > 0 else 999
|
||||
dr = [s['pnl']/25000*100 for s in dstats]
|
||||
sharpe = np.mean(dr) / np.std(dr) * np.sqrt(365) if np.std(dr) > 0 else 0
|
||||
peak_cap = 25000.0; max_dd = 0.0
|
||||
for s in dstats:
|
||||
peak_cap = max(peak_cap, s['cap'])
|
||||
dd = (peak_cap - s['cap']) / peak_cap * 100
|
||||
max_dd = max(max_dd, dd)
|
||||
|
||||
return {
|
||||
'label': label, 'roi': roi, 'pf': pf_val, 'dd': max_dd,
|
||||
'sharpe': sharpe, 'trades': len(tr), 'capital': engine.capital,
|
||||
}
|
||||
|
||||
# Run A: Baseline (ob_engine=None) — should match PF 1.215, ROI +75.87%
|
||||
print(f"\n Running baseline (ob_engine=None)...")
|
||||
t7 = time.time()
|
||||
result_a = run_engine("A: Baseline (no OB)")
|
||||
t_a = time.time() - t7
|
||||
print(f" {result_a['label']}: ROI={result_a['roi']:+.2f}% PF={result_a['pf']:.3f} "
|
||||
f"DD={result_a['dd']:.2f}% Sharpe={result_a['sharpe']:.2f} Trades={result_a['trades']} [{t_a:.0f}s]")
|
||||
|
||||
# Run B: Neutral MockOBProvider (should produce SAME results as baseline)
|
||||
print(f" Running with neutral MockOBProvider...")
|
||||
mock_neutral_eng = OBFeatureEngine(MockOBProvider(imbalance_bias=0.0, depth_scale=1.0,
|
||||
assets=["BTCUSDT", "ETHUSDT", "BNBUSDT", "SOLUSDT"]))
|
||||
mock_neutral_eng.preload_date("mock", mock_neutral_eng.provider.get_assets())
|
||||
t7b = time.time()
|
||||
result_b = run_engine("B: Neutral OB", mock_neutral_eng)
|
||||
t_b = time.time() - t7b
|
||||
print(f" {result_b['label']}: ROI={result_b['roi']:+.2f}% PF={result_b['pf']:.3f} "
|
||||
f"DD={result_b['dd']:.2f}% Sharpe={result_b['sharpe']:.2f} Trades={result_b['trades']} [{t_b:.0f}s]")
|
||||
|
||||
# Run C: Favorable MockOBProvider (imbalance confirms SHORT)
|
||||
print(f" Running with favorable MockOBProvider (SHORT-confirming)...")
|
||||
mock_fav = OBFeatureEngine(MockOBProvider(imbalance_bias=-0.3, depth_scale=1.5,
|
||||
assets=["BTCUSDT", "ETHUSDT", "BNBUSDT", "SOLUSDT"]))
|
||||
mock_fav.preload_date("mock", mock_fav.provider.get_assets())
|
||||
t7c = time.time()
|
||||
result_c = run_engine("C: Favorable OB", mock_fav)
|
||||
t_c = time.time() - t7c
|
||||
print(f" {result_c['label']}: ROI={result_c['roi']:+.2f}% PF={result_c['pf']:.3f} "
|
||||
f"DD={result_c['dd']:.2f}% Sharpe={result_c['sharpe']:.2f} Trades={result_c['trades']} [{t_c:.0f}s]")
|
||||
|
||||
# Run D: Unfavorable MockOBProvider (imbalance contradicts SHORT)
|
||||
print(f" Running with unfavorable MockOBProvider (SHORT-contradicting)...")
|
||||
mock_unfav = OBFeatureEngine(MockOBProvider(imbalance_bias=+0.3, depth_scale=0.5,
|
||||
assets=["BTCUSDT", "ETHUSDT", "BNBUSDT", "SOLUSDT"]))
|
||||
mock_unfav.preload_date("mock", mock_unfav.provider.get_assets())
|
||||
t7d = time.time()
|
||||
result_d = run_engine("D: Unfavorable OB", mock_unfav)
|
||||
t_d = time.time() - t7d
|
||||
print(f" {result_d['label']}: ROI={result_d['roi']:+.2f}% PF={result_d['pf']:.3f} "
|
||||
f"DD={result_d['dd']:.2f}% Sharpe={result_d['sharpe']:.2f} Trades={result_d['trades']} [{t_d:.0f}s]")
|
||||
|
||||
# Verification
|
||||
print(f"\n {'='*60}")
|
||||
print(f" INTEGRATION VERIFICATION")
|
||||
print(f" {'='*60}")
|
||||
|
||||
all_ok = True
|
||||
|
||||
# Check A matches baseline
|
||||
if abs(result_a['roi'] - 75.87) > 2.0:
|
||||
print(f" FAIL: Baseline ROI {result_a['roi']:.2f}% vs expected ~75.87%")
|
||||
all_ok = False
|
||||
else:
|
||||
print(f" PASS: Baseline ROI matches ({result_a['roi']:+.2f}%)")
|
||||
|
||||
if abs(result_a['pf'] - 1.215) > 0.02:
|
||||
print(f" FAIL: Baseline PF {result_a['pf']:.3f} vs expected ~1.215")
|
||||
all_ok = False
|
||||
else:
|
||||
print(f" PASS: Baseline PF matches ({result_a['pf']:.3f})")
|
||||
|
||||
# Check B: neutral OB should not change trade count (may change PF slightly due to OB edge replacement)
|
||||
trade_diff = abs(result_b['trades'] - result_a['trades'])
|
||||
if trade_diff > result_a['trades'] * 0.15:
|
||||
print(f" WARN: Neutral OB trade count differs significantly: {result_b['trades']} vs {result_a['trades']}")
|
||||
else:
|
||||
print(f" PASS: Neutral OB trade count reasonable ({result_b['trades']} vs baseline {result_a['trades']})")
|
||||
|
||||
# Check C: favorable should generally help
|
||||
if result_c['pf'] >= result_a['pf'] * 0.95:
|
||||
print(f" PASS: Favorable OB PF ({result_c['pf']:.3f}) >= baseline floor ({result_a['pf']*0.95:.3f})")
|
||||
else:
|
||||
print(f" WARN: Favorable OB PF ({result_c['pf']:.3f}) below expected")
|
||||
|
||||
# Check D: unfavorable should still be profitable
|
||||
if result_d['roi'] > 0:
|
||||
print(f" PASS: Unfavorable OB still profitable ({result_d['roi']:+.2f}%)")
|
||||
else:
|
||||
print(f" WARN: Unfavorable OB went negative ({result_d['roi']:+.2f}%)")
|
||||
|
||||
print(f"\n Phase 7: {'PASS' if all_ok else 'NEEDS REVIEW'}")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# SUMMARY
|
||||
# ============================================================================
|
||||
print(f"\n{'='*70}")
|
||||
print(" OB INTELLIGENCE TEST SUITE COMPLETE")
|
||||
print(f"{'='*70}")
|
||||
print(f" Phase 1 (Data Validation): {'PASS' if phase1_ok else 'FAIL'}")
|
||||
print(f" Phase 2 (Feature Computation): {'PASS' if phase2_ok else 'FAIL'}")
|
||||
print(f" Phase 3 (Internal Consistency): PASS")
|
||||
print(f" Phase 4 (Placement Quality): PASS")
|
||||
print(f" Phase 5 (Market Agreement): PASS")
|
||||
print(f" Phase 6 (Macro Regime): PASS")
|
||||
if parquet_files:
|
||||
print(f" Phase 7 (No Regression): {'PASS' if all_ok else 'NEEDS REVIEW'}")
|
||||
print(f"{'='*70}")
|
||||
Reference in New Issue
Block a user