Files
DOLPHIN/nautilus_dolphin/debug_dd_curve.py

180 lines
7.7 KiB
Python
Raw Normal View History

"""DD curve analysis — identify where 32.35% drawdown peaks occur."""
import sys, time
import numpy as np
import pandas as pd
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent))
from nautilus_dolphin.nautilus.ob_features import OBFeatureEngine
from nautilus_dolphin.nautilus.ob_provider import MockOBProvider
from nautilus_dolphin.nautilus.esf_alpha_orchestrator import NDAlphaEngine
from nautilus_dolphin.nautilus.adaptive_circuit_breaker import AdaptiveCircuitBreaker
from mc.mc_ml import DolphinForewarner
VBT_DIR = Path(r'C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict\vbt_cache')
META_COLS = {'vel_div','timestamp','bar_index','date'}
VD_THRESH, VD_EXTREME, CONVEXITY = -0.02, -0.05, 3.0
ENGINE_KWARGS = dict(
initial_capital=25000.0, vel_div_threshold=-0.02, vel_div_extreme=-0.05,
fraction=0.20, min_leverage=0.5, max_leverage=5.0,
leverage_convexity=3.0, fixed_tp_pct=0.0099, stop_pct=1.0, max_hold_bars=120,
use_direction_confirm=True, dc_lookback_bars=7, dc_min_magnitude_bps=0.75,
dc_skip_contradicts=True, dc_leverage_boost=1.0, dc_leverage_reduce=0.5,
use_asset_selection=True, min_irp_alignment=0.45,
use_sp_fees=True, use_sp_slippage=True,
sp_maker_entry_rate=0.62, sp_maker_exit_rate=0.50,
use_ob_edge=True, ob_edge_bps=5.0, ob_confirm_rate=0.40,
lookback=100, use_alpha_layers=True, use_dynamic_leverage=True, seed=42,
)
MC_BASE_CFG = {
'trial_id': 0,
'vel_div_threshold': -0.020, 'vel_div_extreme': -0.050,
'use_direction_confirm': True, 'dc_lookback_bars': 7,
'dc_min_magnitude_bps': 0.75, 'dc_skip_contradicts': True,
'dc_leverage_boost': 1.00, 'dc_leverage_reduce': 0.50,
'vd_trend_lookback': 10, 'min_leverage': 0.50,
'max_leverage': 5.00,
'leverage_convexity': 3.00, 'fraction': 0.20,
'use_alpha_layers': True, 'use_dynamic_leverage': True,
'fixed_tp_pct': 0.0099, 'stop_pct': 1.00, 'max_hold_bars': 120,
'use_sp_fees': True, 'use_sp_slippage': True,
'sp_maker_entry_rate': 0.62, 'sp_maker_exit_rate': 0.50,
'use_ob_edge': True, 'ob_edge_bps': 5.00, 'ob_confirm_rate': 0.40,
'ob_imbalance_bias': -0.09, 'ob_depth_scale': 1.00,
'use_asset_selection': True, 'min_irp_alignment': 0.45, 'lookback': 100,
'acb_beta_high': 0.80, 'acb_beta_low': 0.20, 'acb_w750_threshold_pct': 60,
}
forewarner = DolphinForewarner(models_dir=str(Path('mc_results/models')))
parquet_files = sorted([p for p in VBT_DIR.glob('*.parquet') if 'catalog' not in str(p)])
acb = AdaptiveCircuitBreaker()
acb.preload_w750([pf.stem for pf in parquet_files])
# Vol baseline
all_vols = []
for pf in parquet_files[:2]:
df = pd.read_parquet(pf)
if 'BTCUSDT' not in df.columns: continue
pr = df['BTCUSDT'].values
for i in range(60, len(pr)):
seg = pr[max(0,i-50):i]
if len(seg) < 10: continue
v = float(np.std(np.diff(seg)/seg[:-1]))
if v > 0: all_vols.append(v)
vol_p60 = float(np.percentile(all_vols, 60))
pq_data = {}
for pf in parquet_files:
df = pd.read_parquet(pf)
ac = [c for c in df.columns if c not in META_COLS]
bp = df['BTCUSDT'].values if 'BTCUSDT' in df.columns else None
dv = np.full(len(df), np.nan)
if bp is not None:
for i in range(50, len(bp)):
seg = bp[max(0,i-50):i]
if len(seg) < 10: continue
dv[i] = float(np.std(np.diff(seg)/seg[:-1]))
pq_data[pf.stem] = (df, ac, dv)
def strength_cubic(vel_div):
if vel_div >= VD_THRESH: return 0.0
raw = (VD_THRESH - vel_div) / (VD_THRESH - VD_EXTREME)
return min(1.0, max(0.0, raw)) ** CONVEXITY
OB_ASSETS = ['BTCUSDT','ETHUSDT','BNBUSDT','SOLUSDT']
_mock_ob = MockOBProvider(
imbalance_bias=-0.09, depth_scale=1.0, assets=OB_ASSETS,
imbalance_biases={'BTCUSDT': -0.086, 'ETHUSDT': -0.092,
'BNBUSDT': +0.05, 'SOLUSDT': +0.05},
)
ob_eng = OBFeatureEngine(_mock_ob)
ob_eng.preload_date('mock', OB_ASSETS)
engine = NDAlphaEngine(**ENGINE_KWARGS)
engine.set_ob_engine(ob_eng)
engine.set_esoteric_hazard_multiplier(0.0)
bar_idx = 0; ph = {}; dstats = []
for pf in parquet_files:
ds = pf.stem
cs = engine.capital
engine.regime_direction = -1
engine.regime_dd_halt = False
acb_info = acb.get_dynamic_boost_for_date(ds, ob_engine=ob_eng)
base_boost = acb_info['boost']
beta = acb_info['beta']
eff_max_lev = float(ENGINE_KWARGS['max_leverage']) * base_boost
mc_cfg = dict(MC_BASE_CFG); mc_cfg['max_leverage'] = eff_max_lev
mc_report = forewarner.assess_config_dict(mc_cfg)
mc_red = mc_report.catastrophic_probability > 0.25 or mc_report.envelope_score < -1.0
mc_orange = (not mc_red) and (mc_report.envelope_score < 0 or mc_report.catastrophic_probability > 0.10)
mc_size_scale = 0.5 if mc_orange else 1.0
if mc_red:
engine.regime_dd_halt = True
df, acols, dvol = pq_data[ds]
day_idx_before = len(engine.trade_history)
bid = 0
for ri in range(len(df)):
row = df.iloc[ri]
vd = row.get('vel_div')
if vd is None or not np.isfinite(vd): bar_idx += 1; bid += 1; continue
prices = {}
for ac in acols:
p = row[ac]
if p and p > 0 and np.isfinite(p):
prices[ac] = float(p)
if ac not in ph: ph[ac] = []
ph[ac].append(float(p))
if len(ph[ac]) > 500: ph[ac] = ph[ac][-200:]
if not prices: bar_idx += 1; bid += 1; continue
vrok = False if bid < 100 else (np.isfinite(dvol[ri]) and dvol[ri] > vol_p60)
if beta > 0:
ss = strength_cubic(float(vd))
engine.regime_size_mult = base_boost * (1.0 + beta * ss) * mc_size_scale
else:
engine.regime_size_mult = base_boost * mc_size_scale
engine.process_bar(bar_idx=bar_idx, vel_div=float(vd), prices=prices,
vol_regime_ok=vrok, price_histories=ph)
bar_idx += 1; bid += 1
day_trades = engine.trade_history[day_idx_before:]
dw = [t for t in day_trades if t.pnl_absolute > 0]
dl = [t for t in day_trades if t.pnl_absolute <= 0]
avg_loss = float(np.mean([t.pnl_pct for t in dl]) * 100) if dl else 0.0
avg_win = float(np.mean([t.pnl_pct for t in dw]) * 100) if dw else 0.0
dstats.append({
'date': ds, 'pnl': engine.capital - cs, 'cap': engine.capital,
'boost': base_boost, 'beta': beta, 'eff_lev': eff_max_lev,
'trades': len(day_trades), 'wins': len(dw), 'losses': len(dl),
'avg_win': avg_win, 'avg_loss': avg_loss,
})
# Build DD curve
peak = 25000.0
dd_curve = []
for s in dstats:
peak = max(peak, s['cap'])
dd = (peak - s['cap']) / peak * 100
dd_curve.append(dd)
max_dd_idx = int(np.argmax(dd_curve))
print('\n=== PER-DATE EQUITY + DD CURVE ===')
print(f' {"Date":<12} {"Capital":>10} {"Daily P&L":>10} {"DD%":>7} {"Boost":>7} {"eLev":>6} {"T":>4} {"W/L":>7} {"AvgW%":>7} {"AvgL%":>7}')
for i, (s, dd) in enumerate(zip(dstats, dd_curve)):
marker = ' <<< DD PEAK' if i == max_dd_idx else ''
# Show all days with DD > 2% or large pnl swings
if dd > 2.0 or abs(s['pnl']) > 500 or marker:
wl = f"{s['wins']}/{s['losses']}"
print(f' {s["date"]:<12} {s["cap"]:>10,.0f} {s["pnl"]:>+10,.0f} {dd:>7.2f}% '
f'{s["boost"]:>7.2f}x {s["eff_lev"]:>6.2f}x {s["trades"]:>4} {wl:>7} '
f'{s["avg_win"]:>+7.3f} {s["avg_loss"]:>+7.3f}{marker}')
print(f'\n Peak DD: {dd_curve[max_dd_idx]:.2f}% on {dstats[max_dd_idx]["date"]}')
print(f' Final capital: ${engine.capital:,.2f} ROI: {(engine.capital-25000)/25000*100:+.2f}%')
print('\nWorst 10 daily P&L:')
worst = sorted(dstats, key=lambda x: x['pnl'])[:10]
for s in worst:
print(f' {s["date"]}: P&L={s["pnl"]:+,.0f} boost={s["boost"]:.2f}x eLev={s["eff_lev"]:.2f}x T={s["trades"]} W/L={s["wins"]}/{s["losses"]} AvgL={s["avg_loss"]:+.3f}%')