initial: import DOLPHIN baseline 2026-04-21 from dolphinng5_predict working tree
Includes core prod + GREEN/BLUE subsystems: - prod/ (BLUE harness, configs, scripts, docs) - nautilus_dolphin/ (GREEN Nautilus-native impl + dvae/ preserved) - adaptive_exit/ (AEM engine + models/bucket_assignments.pkl) - Observability/ (EsoF advisor, TUI, dashboards) - external_factors/ (EsoF producer) - mc_forewarning_qlabs_fork/ (MC regime/envelope) Excludes runtime caches, logs, backups, and reproducible artifacts per .gitignore.
This commit is contained in:
135
nautilus_dolphin/test_5s_replay_parity.py
Executable file
135
nautilus_dolphin/test_5s_replay_parity.py
Executable file
@@ -0,0 +1,135 @@
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from datetime import datetime, timezone
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
# Ensure we can import from the project root
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
|
||||
# Import Dolphin components
|
||||
from nautilus_dolphin.nautilus.esf_alpha_orchestrator import NDAlphaEngine
|
||||
|
||||
# Mock configuration
|
||||
CONFIG = {
|
||||
'engine': {
|
||||
'initial_capital': 25000.0,
|
||||
'vel_div_threshold': -0.02,
|
||||
'vel_div_extreme': -0.05,
|
||||
'max_leverage': 5.0,
|
||||
'use_alpha_layers': True,
|
||||
'seed': 42
|
||||
},
|
||||
'live_mode': False,
|
||||
'direction': 'short_only'
|
||||
}
|
||||
|
||||
def test_parity():
|
||||
print("=== DOLPHIN-NAUTILUS PARITY TEST ===")
|
||||
|
||||
# 1. Setup Data (load one sample day)
|
||||
vbt_dir = Path(r"C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict\vbt_cache")
|
||||
pq_files = sorted(vbt_dir.glob("*.parquet"))
|
||||
if not pq_files:
|
||||
print("Error: No parquet files found in vbt_cache")
|
||||
return
|
||||
|
||||
sample_file = pq_files[0]
|
||||
date_str = sample_file.stem
|
||||
print(f"Testing for date: {date_str} using {sample_file.name}")
|
||||
|
||||
df = pd.read_parquet(sample_file)
|
||||
asset_columns = [c for c in df.columns if c not in {
|
||||
'timestamp', 'scan_number', 'v50_lambda_max_velocity', 'v150_lambda_max_velocity',
|
||||
'v300_lambda_max_velocity', 'v750_lambda_max_velocity', 'vel_div',
|
||||
'instability_50', 'instability_150'
|
||||
}]
|
||||
|
||||
# Pre-calculate vol_ok (p60 threshold from dynamic_beta_validate)
|
||||
vol_p60 = 0.000099 # Hardcoded champion p60
|
||||
bp = df['BTCUSDT'].values if 'BTCUSDT' in df.columns else None
|
||||
dvol = np.full(len(df), np.nan)
|
||||
if bp is not None:
|
||||
for i in range(50, len(bp)):
|
||||
seg = bp[max(0,i-50):i]
|
||||
if len(seg) >= 10:
|
||||
dvol[i] = float(np.std(np.diff(seg)/seg[:-1]))
|
||||
vol_ok_arr = np.where(np.isfinite(dvol), dvol > vol_p60, False)
|
||||
|
||||
# 2. Path A: Legacy Batch (NDAlphaEngine.process_day)
|
||||
print("\nRunning Path A: Legacy Batch logic...")
|
||||
engine_a = NDAlphaEngine(**CONFIG['engine'])
|
||||
engine_a.set_acb(None)
|
||||
|
||||
batch_stats = engine_a.process_day(
|
||||
date_str=date_str,
|
||||
df=df,
|
||||
asset_columns=asset_columns,
|
||||
vol_regime_ok=vol_ok_arr,
|
||||
direction=-1
|
||||
)
|
||||
print(f" Batch Result: PnL={batch_stats['pnl']:+.4f} Trades={batch_stats['trades']} Capital={batch_stats['capital']:.2f}")
|
||||
|
||||
# 3. Path B: Modern Incremental (Direct NDAlphaEngine methods)
|
||||
print("\nRunning Path B: Modern Incremental logic (direct methods)...")
|
||||
engine_b = NDAlphaEngine(**CONFIG['engine'])
|
||||
engine_b.set_acb(None)
|
||||
|
||||
# Orchestrate calls manually to match DolphinActor logic
|
||||
engine_b.begin_day(date_str, posture='APEX', direction=-1)
|
||||
|
||||
for ri in range(len(df)):
|
||||
row = df.iloc[ri]
|
||||
vd = row.get('vel_div')
|
||||
if vd is None or not np.isfinite(float(vd)):
|
||||
engine_b._global_bar_idx += 1 # MUST increment to maintain parity with process_day logic
|
||||
continue
|
||||
|
||||
v50_raw = row.get('v50_lambda_max_velocity')
|
||||
v750_raw = row.get('v750_lambda_max_velocity')
|
||||
v50_val = float(v50_raw) if (v50_raw is not None and np.isfinite(float(v50_raw))) else 0.0
|
||||
v750_val = float(v750_raw) if (v750_raw is not None and np.isfinite(float(v750_raw))) else 0.0
|
||||
|
||||
prices = {}
|
||||
for ac in asset_columns:
|
||||
p = row.get(ac)
|
||||
if p is not None and p > 0 and np.isfinite(p):
|
||||
prices[ac] = float(p)
|
||||
|
||||
if not prices:
|
||||
engine_b._global_bar_idx += 1 # MUST increment to maintain parity
|
||||
continue
|
||||
|
||||
vol_ok = bool(vol_ok_arr[ri])
|
||||
|
||||
# Step the bar (internally increments _global_bar_idx)
|
||||
engine_b.step_bar(
|
||||
bar_idx=ri,
|
||||
vel_div=float(vd),
|
||||
prices=prices,
|
||||
vol_regime_ok=vol_ok,
|
||||
v50_vel=v50_val,
|
||||
v750_vel=v750_val
|
||||
)
|
||||
|
||||
incremental_stats = engine_b.end_day()
|
||||
print(f" Incremental Result: PnL={incremental_stats['pnl']:+.4f} Trades={incremental_stats['trades']} Capital={incremental_stats['capital']:.2f}")
|
||||
|
||||
# 4. Parity Check
|
||||
pnl_diff = abs(batch_stats['pnl'] - incremental_stats['pnl'])
|
||||
trade_diff = abs(batch_stats['trades'] - incremental_stats['trades'])
|
||||
|
||||
print("\n=== PARITY ANALYSIS ===")
|
||||
print(f" PnL Difference: {pnl_diff:.8f}")
|
||||
print(f" Trade Difference: {trade_diff}")
|
||||
|
||||
success = pnl_diff < 1e-6 and trade_diff == 0
|
||||
if success:
|
||||
print("\nSUCCESS: Batch and Incremental logic paths are identical.")
|
||||
else:
|
||||
print("\nFAILURE: Logic divergence detected.")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_parity()
|
||||
Reference in New Issue
Block a user