"""Part 2 only — Adaptive Sharpe Monitor Prototype (55-day champion window). Part 1 (static fraction sweep) already complete. Results: frac=0.16: ROI=+44.90% PF=1.1559 DD=11.86% Sh=2.617 <- Best PF & Sharpe frac=0.18: ROI=+51.02% PF=1.1524 DD=13.39% Sh=2.554 frac=0.20: ROI=+57.18% PF=1.1487 DD=14.94% Sh=2.490 <- BASELINE frac=0.22: ROI=+63.38% PF=1.1450 DD=16.50% Sh=2.426 frac=0.24: ROI=+69.59% PF=1.1413 DD=18.07% Sh=2.361 frac=0.26: ROI=+75.79% PF=1.1376 DD=19.65% Sh=2.295 frac=0.28: ROI=+81.96% PF=1.1338 DD=21.24% Sh=2.230 frac=0.30: ROI=+88.08% PF=1.1301 DD=22.84% Sh=2.165 <- Best ROI Pattern: +0.02 fraction -> +6.2% ROI, +1.57% DD, -0.003 PF, -0.065 Sharpe Implication: higher static fraction buys ROI at cost of Sharpe/PF/DD. Adaptive monitor goal: get ROI of higher-fraction days WITHOUT the DD cost. Adaptive Sharpe Monitor: Rolling 20-day realized Sharpe -> fraction multiplier (EWMA smoothed). Elastic ceiling: soft=1.20x, apex=1.35x. Ceiling expands when ACB boost + MC GREEN + low drawdown align (mirrors leverage 5x->6x doctrine). """ import sys, time, json, csv sys.stdout.reconfigure(encoding='utf-8', errors='replace') from pathlib import Path from datetime import datetime import numpy as np import pandas as pd sys.path.insert(0, str(Path(__file__).parent)) print("Compiling numba kernels...") t0c = time.time() from nautilus_dolphin.nautilus.alpha_asset_selector import compute_irp_nb, compute_ars_nb, rank_assets_irp_nb from nautilus_dolphin.nautilus.alpha_bet_sizer import compute_sizing_nb from nautilus_dolphin.nautilus.alpha_signal_generator import check_dc_nb from nautilus_dolphin.nautilus.ob_features import ( OBFeatureEngine, compute_imbalance_nb, compute_depth_1pct_nb, compute_depth_quality_nb, compute_fill_probability_nb, compute_spread_proxy_nb, compute_depth_asymmetry_nb, compute_imbalance_persistence_nb, compute_withdrawal_velocity_nb, compute_market_agreement_nb, compute_cascade_signal_nb, ) from nautilus_dolphin.nautilus.ob_provider import MockOBProvider _p = np.array([1.0, 2.0, 3.0], dtype=np.float64) compute_irp_nb(_p, -1); compute_ars_nb(1.0, 0.5, 0.01) rank_assets_irp_nb(np.ones((10, 2), dtype=np.float64), 8, -1, 5, 500.0, 20, 0.20) compute_sizing_nb(-0.03, -0.02, -0.05, 3.0, 0.5, 5.0, 0.20, True, True, 0.0, np.zeros(4, dtype=np.int64), np.zeros(4, dtype=np.int64), np.zeros(5, dtype=np.float64), 0, -1, 0.01, 0.04) check_dc_nb(_p, 3, 1, 0.75) _b = np.array([100.0, 200.0, 300.0, 400.0, 500.0], dtype=np.float64) _a = np.array([110.0, 190.0, 310.0, 390.0, 510.0], dtype=np.float64) compute_imbalance_nb(_b, _a); compute_depth_1pct_nb(_b, _a) compute_depth_quality_nb(210.0, 200.0); compute_fill_probability_nb(1.0) compute_spread_proxy_nb(_b, _a); compute_depth_asymmetry_nb(_b, _a) compute_imbalance_persistence_nb(np.array([0.1, -0.1], dtype=np.float64), 2) compute_withdrawal_velocity_nb(np.array([100.0, 110.0], dtype=np.float64), 1) compute_market_agreement_nb(np.array([0.1, -0.05], dtype=np.float64), 2) compute_cascade_signal_nb(np.array([-0.05, -0.15], dtype=np.float64), 2, -0.10) print(f" JIT: {time.time()-t0c:.1f}s") from nautilus_dolphin.nautilus.esf_alpha_orchestrator import NDAlphaEngine from nautilus_dolphin.nautilus.adaptive_circuit_breaker import AdaptiveCircuitBreaker from mc.mc_ml import DolphinForewarner # ── Config ─────────────────────────────────────────────────────────────────────── VBT_DIR = Path(r"C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict\vbt_cache") DATE_START = '2025-12-31' DATE_END = '2026-02-25' META_COLS = {'timestamp', 'scan_number', 'v50_lambda_max_velocity', 'v150_lambda_max_velocity', 'v300_lambda_max_velocity', 'v750_lambda_max_velocity', 'vel_div', 'instability_50', 'instability_150'} MC_MODELS_DIR = str(Path(r"C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict\nautilus_dolphin\mc_results\models")) BASE_FRACTION = 0.20 INITIAL_CAPITAL = 25000.0 MULT_SOFT_CEILING = 1.20 MULT_APEX_CEILING = 1.35 SHARPE_LOOKBACK = 20 EWMA_ALPHA = 0.18 # ~5-day half-life BASE_ENGINE_KWARGS = dict( initial_capital=INITIAL_CAPITAL, vel_div_threshold=-0.02, vel_div_extreme=-0.05, min_leverage=0.5, max_leverage=5.0, leverage_convexity=3.0, fraction=BASE_FRACTION, fixed_tp_pct=0.0095, stop_pct=1.0, max_hold_bars=120, use_direction_confirm=True, dc_lookback_bars=7, dc_min_magnitude_bps=0.75, dc_skip_contradicts=True, dc_leverage_boost=1.0, dc_leverage_reduce=0.5, use_asset_selection=True, min_irp_alignment=0.45, use_sp_fees=True, use_sp_slippage=True, sp_maker_entry_rate=0.62, sp_maker_exit_rate=0.50, use_ob_edge=True, ob_edge_bps=5.0, ob_confirm_rate=0.40, lookback=100, use_alpha_layers=True, use_dynamic_leverage=True, seed=42, ) MC_BASE_CFG = { 'trial_id': 0, 'vel_div_threshold': -0.020, 'vel_div_extreme': -0.050, 'use_direction_confirm': True, 'dc_lookback_bars': 7, 'dc_min_magnitude_bps': 0.75, 'dc_skip_contradicts': True, 'dc_leverage_boost': 1.00, 'dc_leverage_reduce': 0.50, 'vd_trend_lookback': 10, 'min_leverage': 0.50, 'max_leverage': 5.00, 'leverage_convexity': 3.00, 'fraction': BASE_FRACTION, 'use_alpha_layers': True, 'use_dynamic_leverage': True, 'fixed_tp_pct': 0.0095, 'stop_pct': 1.00, 'max_hold_bars': 120, 'use_sp_fees': True, 'use_sp_slippage': True, 'sp_maker_entry_rate': 0.62, 'sp_maker_exit_rate': 0.50, 'use_ob_edge': True, 'ob_edge_bps': 5.00, 'ob_confirm_rate': 0.40, 'ob_imbalance_bias': -0.09, 'ob_depth_scale': 1.00, 'use_asset_selection': True, 'min_irp_alignment': 0.45, 'lookback': 100, 'acb_beta_high': 0.80, 'acb_beta_low': 0.20, 'acb_w750_threshold_pct': 60, } OB_ASSETS = ["BTCUSDT", "ETHUSDT", "BNBUSDT", "SOLUSDT"] # ── Elastic ceiling formula ─────────────────────────────────────────────────────── def compute_sizing_mult(rolling_sharpe, acb_boost_float, mc_status, current_drawdown): """ Returns (fraction_mult, effective_ceiling). Base mult: piecewise-linear on rolling Sharpe. Elastic ceiling: soft=1.20, apex=1.35. Expands when ACB-boost + MC-GREEN + low drawdown align. Mirrors leverage doctrine (5x soft-cap -> 6x apex). """ if rolling_sharpe < 1.5: base_mult = 0.85 elif rolling_sharpe < 2.0: base_mult = 0.85 + (rolling_sharpe - 1.5) / 0.5 * 0.10 elif rolling_sharpe < 2.5: base_mult = 0.95 + (rolling_sharpe - 2.0) / 0.5 * 0.05 elif rolling_sharpe < 3.0: base_mult = 1.00 + (rolling_sharpe - 2.5) / 0.5 * 0.10 elif rolling_sharpe < 3.5: base_mult = 1.10 + (rolling_sharpe - 3.0) / 0.5 * 0.10 else: base_mult = 1.25 # Elastic ceiling expansion score [0,1] ceiling_headroom = MULT_APEX_CEILING - MULT_SOFT_CEILING # 0.15 score = 0.0 if acb_boost_float >= 1.55: score += 0.50 elif acb_boost_float >= 1.35: score += 0.25 if mc_status == 'GREEN': score += 0.30 elif mc_status == 'ORANGE': score -= 0.60 if current_drawdown < 0.03: score += 0.20 elif current_drawdown > 0.10: score -= 0.40 score = max(0.0, min(1.0, score)) effective_ceiling = MULT_SOFT_CEILING + score * ceiling_headroom # Hard safety overrides if mc_status == 'ORANGE': effective_ceiling = min(effective_ceiling, 1.10) if current_drawdown > 0.12: effective_ceiling = min(effective_ceiling, 1.00) effective_ceiling = min(effective_ceiling, MULT_APEX_CEILING) return min(base_mult, effective_ceiling), effective_ceiling # ── Shared setup ───────────────────────────────────────────────────────────────── print("\nLoading MC-Forewarner...") forewarner = DolphinForewarner(models_dir=MC_MODELS_DIR) parquet_files = sorted( p for p in VBT_DIR.glob("*.parquet") if 'catalog' not in str(p) and DATE_START <= p.stem <= DATE_END ) date_strings = [pf.stem for pf in parquet_files] print(f"Dates: {len(parquet_files)} ({date_strings[0]} to {date_strings[-1]})") acb = AdaptiveCircuitBreaker() acb.preload_w750(date_strings) all_vols = [] for pf in parquet_files[:2]: df = pd.read_parquet(pf) if 'BTCUSDT' not in df.columns: continue pr = df['BTCUSDT'].values for i in range(60, len(pr)): seg = pr[max(0, i-50):i] if len(seg) < 10: continue v = float(np.std(np.diff(seg)/seg[:-1])) if v > 0: all_vols.append(v) vol_p60 = float(np.percentile(all_vols, 60)) print(f"Vol p60: {vol_p60:.6f}") print(f"Pre-loading {len(parquet_files)} parquets...") pq_data = {} for pf in parquet_files: df = pd.read_parquet(pf) ac = [c for c in df.columns if c not in META_COLS] bp = df['BTCUSDT'].values if 'BTCUSDT' in df.columns else None dv = np.full(len(df), np.nan) if bp is not None: for i in range(50, len(bp)): seg = bp[max(0, i-50):i] if len(seg) < 10: continue dv[i] = float(np.std(np.diff(seg)/seg[:-1])) pq_data[pf.stem] = (df, ac, dv) print(f" Done") _mock_ob = MockOBProvider( imbalance_bias=-0.09, depth_scale=1.0, assets=OB_ASSETS, imbalance_biases={"BTCUSDT": -0.086, "ETHUSDT": -0.092, "BNBUSDT": +0.05, "SOLUSDT": +0.05}, ) ob_eng = OBFeatureEngine(_mock_ob) ob_eng.preload_date("mock", OB_ASSETS) # ════════════════════════════════════════════════════════════════════════════════ # Run: Adaptive Sharpe Monitor # ════════════════════════════════════════════════════════════════════════════════ print(f"\n{'='*70}") print(f" ADAPTIVE SHARPE MONITOR — 55-day run") print(f" Soft ceiling={MULT_SOFT_CEILING}x Apex ceiling={MULT_APEX_CEILING}x") print(f" Rolling window={SHARPE_LOOKBACK}d EWMA alpha={EWMA_ALPHA}") print(f"{'='*70}\n") engine = NDAlphaEngine(**BASE_ENGINE_KWARGS) engine.set_ob_engine(ob_eng) engine.set_acb(acb) engine.set_mc_forewarner(forewarner, MC_BASE_CFG) engine.set_esoteric_hazard_multiplier(0.0) daily_pnl = [] daily_log = [] ewma_mult = 1.0 peak_cap = INITIAL_CAPITAL t_run = time.time() for ds in date_strings: df, acols, dvol = pq_data[ds] vol_ok = np.where(np.isfinite(dvol), dvol > vol_p60, False) # Rolling Sharpe (annualised) from prior days if len(daily_pnl) >= 5: window = np.array(daily_pnl[-SHARPE_LOOKBACK:]) roll_sh = float(window.mean() / window.std() * np.sqrt(252)) if window.std() > 0 else 0.0 else: roll_sh = 0.0 cur_dd = (peak_cap - engine.capital) / peak_cap if peak_cap > 0 else 0.0 # ACB boost for this date (float) _acb_info = acb.get_boost_for_date(ds) if hasattr(acb, 'get_boost_for_date') else {} acb_boost_today = _acb_info.get('boost', 1.0) if isinstance(_acb_info, dict) else float(_acb_info) raw_mult, eff_ceiling = compute_sizing_mult(roll_sh, acb_boost_today, 'GREEN', cur_dd) # EWMA smoothing — prevents day-to-day whipsaw ewma_mult = EWMA_ALPHA * raw_mult + (1.0 - EWMA_ALPHA) * ewma_mult ewma_mult = max(0.80, min(ewma_mult, eff_ceiling)) # Apply to engine live engine.bet_sizer.base_fraction = BASE_FRACTION * ewma_mult r = engine.process_day(ds, df, acols, vol_regime_ok=vol_ok) pnl_today = r.get('pnl', 0.0) daily_pnl.append(pnl_today) if engine.capital > peak_cap: peak_cap = engine.capital daily_log.append({ 'date': ds, 'pnl': round(pnl_today, 2), 'capital': round(engine.capital, 2), 'trades': r.get('trades', 0), 'rolling_sharpe': round(roll_sh, 3), 'acb_boost': round(acb_boost_today, 4), 'raw_mult': round(raw_mult, 4), 'ewma_mult': round(ewma_mult, 4), 'effective_ceiling': round(eff_ceiling, 4), 'applied_fraction': round(BASE_FRACTION * ewma_mult, 4), 'drawdown_pct': round(cur_dd * 100, 2), }) print(f" {ds} PnL={pnl_today:+8.1f} Cap={engine.capital:10.0f} " f"RollSh={roll_sh:6.3f} ACBboost={acb_boost_today:.3f} " f"Mult(ewma)={ewma_mult:.4f} Ceil={eff_ceiling:.4f} " f"Frac={BASE_FRACTION*ewma_mult:.4f} DD={cur_dd*100:.1f}%") sys.stdout.flush() # ── Stats ───────────────────────────────────────────────────────────────────────── tr = engine.trade_history wins = [t for t in tr if t.pnl_absolute > 0] losses = [t for t in tr if t.pnl_absolute <= 0] gw = sum(t.pnl_absolute for t in wins) gl = abs(sum(t.pnl_absolute for t in losses)) roi = (engine.capital - INITIAL_CAPITAL) / INITIAL_CAPITAL * 100.0 pf = gw / gl if gl > 0 else 999.0 wr = len(wins) / len(tr) * 100.0 if tr else 0.0 pnls = np.array(daily_pnl) sharpe = float(pnls.mean() / pnls.std() * np.sqrt(252)) if pnls.std() > 0 else 0.0 pk = INITIAL_CAPITAL; max_dd = 0.0 running = INITIAL_CAPITAL for p in daily_pnl: running += p if running > pk: pk = running dd = (pk - running) / pk * 100.0 if dd > max_dd: max_dd = dd fracs = [r['applied_fraction'] for r in daily_log] ceilings = [r['effective_ceiling'] for r in daily_log] BASELINE = {'roi': 57.18, 'pf': 1.1487, 'dd': 14.94, 'sharpe': 2.490} print(f"\n{'='*70}") print(f" ADAPTIVE SHARPE MONITOR — RESULT") print(f"{'='*70}") print(f" ROI: {roi:+.2f}% (baseline 0.20-fixed: +{BASELINE['roi']:.2f}% delta={roi-BASELINE['roi']:+.2f}%)") print(f" PF: {pf:.4f} (baseline: {BASELINE['pf']:.4f} delta={pf-BASELINE['pf']:+.4f})") print(f" DD: {max_dd:.2f}% (baseline: {BASELINE['dd']:.2f}% delta={max_dd-BASELINE['dd']:+.2f}%)") print(f" Sharpe: {sharpe:.3f} (baseline: {BASELINE['sharpe']:.3f} delta={sharpe-BASELINE['sharpe']:+.3f})") print(f" WR: {wr:.1f}% Trades: {len(tr)}") print(f" Applied fraction: mean={np.mean(fracs):.4f} min={np.min(fracs):.4f} max={np.max(fracs):.4f}") print(f" Elastic ceiling: mean={np.mean(ceilings):.4f} max={np.max(ceilings):.4f} " f"days-at-apex={sum(1 for c in ceilings if c >= MULT_APEX_CEILING-0.001)}/{len(ceilings)}") print(f" Runtime: {time.time()-t_run:.0f}s") # ── Save ───────────────────────────────────────────────────────────────────────── ts = datetime.now().strftime('%Y%m%d_%H%M%S') run_dir = Path(__file__).parent / 'run_logs' run_dir.mkdir(exist_ok=True) with open(run_dir / f'sharpe_adaptive_{ts}.csv', 'w', newline='') as f: w = csv.DictWriter(f, fieldnames=list(daily_log[0].keys())) w.writeheader(); w.writerows(daily_log) PART1_RESULTS = [ {'fraction':0.16,'roi':44.90,'pf':1.1559,'dd':11.86,'sharpe':2.617,'wr':49.6,'n_trades':2138}, {'fraction':0.18,'roi':51.02,'pf':1.1524,'dd':13.39,'sharpe':2.554,'wr':49.6,'n_trades':2138}, {'fraction':0.20,'roi':57.18,'pf':1.1487,'dd':14.94,'sharpe':2.490,'wr':49.6,'n_trades':2138}, {'fraction':0.22,'roi':63.38,'pf':1.1450,'dd':16.50,'sharpe':2.426,'wr':49.6,'n_trades':2138}, {'fraction':0.24,'roi':69.59,'pf':1.1413,'dd':18.07,'sharpe':2.361,'wr':49.6,'n_trades':2138}, {'fraction':0.26,'roi':75.79,'pf':1.1376,'dd':19.65,'sharpe':2.295,'wr':49.6,'n_trades':2138}, {'fraction':0.28,'roi':81.96,'pf':1.1338,'dd':21.24,'sharpe':2.230,'wr':49.6,'n_trades':2138}, {'fraction':0.30,'roi':88.08,'pf':1.1301,'dd':22.84,'sharpe':2.165,'wr':49.6,'n_trades':2138}, ] summary = { 'experiment': 'fraction_sweep_and_sharpe_adaptive_55day', 'date_range': f'{DATE_START}_to_{DATE_END}', 'base_fraction': BASE_FRACTION, 'fixed_tp_pct': 0.0095, 'kelly_anchor': {'mean_pnl_pct': 0.051, 'sigma_pct': 0.908, 'full_kelly_frac': 0.062, 'half_kelly_frac': 0.031}, 'elastic_ceiling': { 'soft_ceiling': MULT_SOFT_CEILING, 'apex_ceiling': MULT_APEX_CEILING, 'sharpe_lookback_days': SHARPE_LOOKBACK, 'ewma_alpha': EWMA_ALPHA, }, 'part1_static_sweep': PART1_RESULTS, 'part1_observation': ( 'ROI scales linearly with fraction (+6.2% per +0.02). ' 'BUT Sharpe and PF DECREASE with higher fraction. ' 'Higher static fraction = more capital at risk per same signal quality. ' 'Adaptive goal: capture ROI of high-fraction on strong Sharpe days ' 'while reducing on weak Sharpe days to protect DD.' ), 'part2_adaptive': { 'roi': roi, 'pf': pf, 'dd': max_dd, 'sharpe': sharpe, 'wr': wr, 'n_trades': len(tr), 'delta_roi_vs_baseline': roi - BASELINE['roi'], 'delta_sharpe_vs_baseline': sharpe - BASELINE['sharpe'], 'delta_dd_vs_baseline': max_dd - BASELINE['dd'], 'fraction_mean': float(np.mean(fracs)), 'fraction_min': float(np.min(fracs)), 'fraction_max': float(np.max(fracs)), 'ceiling_mean': float(np.mean(ceilings)), 'ceiling_max': float(np.max(ceilings)), 'days_at_apex_ceiling': int(sum(1 for c in ceilings if c >= MULT_APEX_CEILING - 0.001)), }, 'run_ts': ts, } with open(run_dir / f'fraction_sharpe_{ts}.json', 'w') as f: json.dump(summary, f, indent=2) print(f"\nSaved:") print(f" run_logs/sharpe_adaptive_{ts}.csv") print(f" run_logs/fraction_sharpe_{ts}.json")