initial: import DOLPHIN baseline 2026-04-21 from dolphinng5_predict working tree

Includes core prod + GREEN/BLUE subsystems:
- prod/ (BLUE harness, configs, scripts, docs)
- nautilus_dolphin/ (GREEN Nautilus-native impl + dvae/ preserved)
- adaptive_exit/ (AEM engine + models/bucket_assignments.pkl)
- Observability/ (EsoF advisor, TUI, dashboards)
- external_factors/ (EsoF producer)
- mc_forewarning_qlabs_fork/ (MC regime/envelope)

Excludes runtime caches, logs, backups, and reproducible artifacts per .gitignore.
This commit is contained in:
hjnormey
2026-04-21 16:58:38 +02:00
commit 01c19662cb
643 changed files with 260241 additions and 0 deletions

View File

@@ -0,0 +1,206 @@
import sys, time
from pathlib import Path
import numpy as np
import pandas as pd
import json
sys.path.insert(0, str(Path(__file__).parent))
from nautilus_dolphin.nautilus.alpha_orchestrator import NDAlphaEngine
from nautilus_dolphin.nautilus.adaptive_circuit_breaker import AdaptiveCircuitBreaker
from nautilus_dolphin.nautilus.ob_features import OBFeatureEngine
from nautilus_dolphin.nautilus.ob_provider import MockOBProvider
from mc.mc_ml import DolphinForewarner
from mc.mc_sampler import MCTrialConfig
VBT_DIR = Path(r"C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict\vbt_cache")
META_COLS = {'timestamp', 'scan_number', 'v50_lambda_max_velocity', 'v150_lambda_max_velocity',
'v300_lambda_max_velocity', 'v750_lambda_max_velocity', 'vel_div',
'instability_50', 'instability_150'}
parquet_files = sorted(VBT_DIR.glob("*.parquet"))
parquet_files = [p for p in parquet_files if 'catalog' not in str(p)]
print("Loading data...")
all_vols = []
for pf in parquet_files[:2]:
df = pd.read_parquet(pf)
if 'BTCUSDT' not in df.columns: continue
pr = df['BTCUSDT'].values
for i in range(60, len(pr)):
seg = pr[max(0,i-50):i]
if len(seg)<10: continue
v = float(np.std(np.diff(seg)/seg[:-1]))
if v > 0: all_vols.append(v)
vol_p60 = float(np.percentile(all_vols, 60))
pq_data = {}
for pf in parquet_files:
df = pd.read_parquet(pf)
ac = [c for c in df.columns if c not in META_COLS]
bp = df['BTCUSDT'].values if 'BTCUSDT' in df.columns else None
dv = np.full(len(df), np.nan)
if bp is not None:
for i in range(50, len(bp)):
seg = bp[max(0,i-50):i]
if len(seg)<10: continue
dv[i] = float(np.std(np.diff(seg)/seg[:-1]))
pq_data[pf.stem] = (df, ac, dv)
# Initialize systems
acb = AdaptiveCircuitBreaker()
acb.preload_w750([pf.stem for pf in parquet_files])
mock = MockOBProvider(imbalance_bias=-0.09, depth_scale=1.0,
assets=["BTCUSDT", "ETHUSDT", "BNBUSDT", "SOLUSDT"],
imbalance_biases={"BNBUSDT": 0.20, "SOLUSDT": 0.20})
ob_engine_inst = OBFeatureEngine(mock)
ob_engine_inst.preload_date("mock", mock.get_assets())
def run_test(name, use_ob_engine=True):
ENGINE_KWARGS = dict(
initial_capital=25000.0, vel_div_threshold=-0.02, vel_div_extreme=-0.05,
min_leverage=0.5, max_leverage=5.0, leverage_convexity=3.0,
fraction=0.20, fixed_tp_pct=0.0099, stop_pct=1.0, max_hold_bars=120,
use_direction_confirm=True, dc_lookback_bars=7, dc_min_magnitude_bps=0.75,
dc_skip_contradicts=True, dc_leverage_boost=1.0, dc_leverage_reduce=0.5,
use_asset_selection=True, min_irp_alignment=0.45,
use_sp_fees=True, use_sp_slippage=True,
use_ob_edge=use_ob_engine, ob_edge_bps=5.0, ob_confirm_rate=0.40,
lookback=100, use_alpha_layers=True, use_dynamic_leverage=True, seed=42,
)
# Re-evaluate config against ML forewarner
forewarner = DolphinForewarner(models_dir=str(Path(__file__).parent / "mc_results" / "models"))
config = MCTrialConfig(
trial_id="LIVE",
vel_div_threshold=-0.02, vel_div_extreme=-0.05,
min_leverage=0.5, max_leverage=5.0, leverage_convexity=3.0,
fraction=0.20, fixed_tp_pct=0.0099, stop_pct=1.0, max_hold_bars=120,
use_direction_confirm=True, dc_lookback_bars=7, dc_min_magnitude_bps=0.75,
dc_skip_contradicts=True, dc_leverage_boost=1.0, dc_leverage_reduce=0.5,
vd_trend_lookback=20, use_sp_fees=True, use_sp_slippage=True,
sp_maker_entry_rate=0.62, sp_maker_exit_rate=0.5,
use_asset_selection=True, min_irp_alignment=0.45,
use_ob_edge=use_ob_engine, ob_edge_bps=5.0, ob_confirm_rate=0.40,
ob_imbalance_bias=0.0, ob_depth_scale=1.0,
lookback=100, use_alpha_layers=True, use_dynamic_leverage=True,
acb_beta_high=1.5, acb_beta_low=0.2, acb_w750_threshold_pct=60.0
)
report = forewarner.assess(config)
env = report.envelope_score
cat = report.catastrophic_probability
champ = report.champion_probability
is_green = (env > 0.5 and champ > 0.6)
print(f"\n[{name}]")
print(f" MC-Forewarner Status: Env={env:.2f}, Cat={cat:.1%}, Champ={champ:.1%} -> GREEN? {is_green}")
import gc
gc.collect()
engine = NDAlphaEngine(**ENGINE_KWARGS)
# Apply our newly added dynamic capacity
engine.set_mc_forewarner_status(is_green)
print(f" Engine Ceiling set to: {engine.bet_sizer.max_leverage}x")
ob_ref = ob_engine_inst if use_ob_engine else None
if ob_ref:
engine.set_ob_engine(ob_ref)
bar_idx = 0; peak_cap = engine.capital; max_dd = 0.0
daily_returns = []
for pf in parquet_files:
ds = pf.stem
cs = engine.capital
# ACB logic
acb_info = acb.get_dynamic_boost_for_date(ds, ob_engine=ob_ref)
base_boost = acb_info['boost']
beta = acb_info['beta']
df, acols, dvol = pq_data[ds]
ph = {}
for ri in range(len(df)):
row = df.iloc[ri]; vd = row.get("vel_div")
if vd is None or not np.isfinite(vd): bar_idx+=1; continue
prices = {}
for ac in acols:
p = row[ac]
if p and p > 0 and np.isfinite(p):
prices[ac] = float(p)
if ac not in ph: ph[ac] = []
ph[ac].append(float(p))
if len(ph[ac]) > 500: ph[ac] = ph[ac][-200:]
if not prices: bar_idx+=1; continue
vrok = False if ri < 100 else (np.isfinite(dvol[ri]) and dvol[ri] > vol_p60)
if beta > 0:
ss = 0.0
if vd < -0.02:
raw = (-0.02 - float(vd)) / (-0.02 - -0.05)
ss = min(1.0, max(0.0, raw)) ** 3.0
engine.regime_size_mult = base_boost * (1.0 + beta * ss)
else:
engine.regime_size_mult = base_boost
engine.process_bar(bar_idx=bar_idx, vel_div=float(vd), prices=prices, vol_regime_ok=vrok, price_histories=ph)
bar_idx += 1
peak_cap = max(peak_cap, engine.capital)
dd = (peak_cap - engine.capital) / peak_cap
max_dd = max(max_dd, dd)
# Daily Return
daily_returns.append((engine.capital - cs) / cs if cs > 0 else 0)
trades = engine.trade_history
w = [t for t in trades if t.pnl_absolute > 0]
l = [t for t in trades if t.pnl_absolute <= 0]
gw = sum(t.pnl_absolute for t in w) if w else 0
gl = abs(sum(t.pnl_absolute for t in l)) if l else 0
roi = (engine.capital - 25000) / 25000 * 100
pf_val = gw / gl if gl > 0 else 999
wr = len(w) / len(trades) * 100 if trades else 0
R = np.array(daily_returns)
variance = np.var(R, ddof=1)
# Geometric Growth Rate (Daily & Annualized)
ggr_daily = np.exp(np.mean(np.log1p(R))) - 1
ggr_ann = ((1 + ggr_daily)**365) - 1
return {
'name': name,
'roi': roi,
'pf': pf_val,
'wr': wr,
'max_dd': max_dd * 100,
'trades': len(trades),
'variance': variance,
'ggr_ann': ggr_ann * 100
}
print("\n" + "="*80)
print("OB FILTER IMPACT ON VARIANCE & GEOMETRIC GROWTH RATE (Dynamic MC-Forewarner Limits)")
print("="*80)
res1 = run_test("1. BEFORE OB FILTER (Baseline Alpha Layers)", use_ob_engine=False)
res2 = run_test("2. AFTER OB FILTER (Full Stack OB Injection)", use_ob_engine=True)
print("\n" + "="*80)
print(" FINAL DIRECT COMPARISON (55-Day Vectorized Trajectory)")
print("="*80)
print(f" | {'Before OB Filter':>18} | {'After OB Filter':>18} | {'Delta':>10}")
print("-" * 80)
print(f" Wr (%) | {res1['wr']:>17.2f}% | {res2['wr']:>17.2f}% | {res2['wr']-res1['wr']:>+9.2f}%")
print(f" PF | {res1['pf']:>18.3f} | {res2['pf']:>18.3f} | {res2['pf']-res1['pf']:>+10.3f}")
print(f" Max DD | {res1['max_dd']:>17.2f}% | {res2['max_dd']:>17.2f}% | {res2['max_dd']-res1['max_dd']:>+9.2f}%")
print(f" Daily Variance (s²)| {res1['variance']:>18.6f} | {res2['variance']:>18.6f} | {(res2['variance']/res1['variance'] - 1)*100:>+9.2f}%")
print(f" Geometric Growth | {res1['ggr_ann']:>17.2f}% | {res2['ggr_ann']:>17.2f}% | {res2['ggr_ann']-res1['ggr_ann']:>+9.2f}%")
print("="*80)