""" exp15_stop_gate.py — z[13]-gated per-trade stop tightening AND TP extension. Tests whether per-trade exit overrides based on daily z[13] (proxy_B dim from v2 model) can improve the D_LIQ_GOLD baseline. Families: A — Stop tightening only (high z13 → tight stop) [12 configs] B — TP extension only (low z13 → higher TP) [20 configs] C — Hold extension only (low z13 → more bars) [12 configs] D — TP + Hold combined (low z13 → both) [12 configs] E — Asymmetric bidirectional (HIGH→tight stop, LOW→higher TP) [6 configs] Baseline: D_LIQ_GOLD (soft=8x, hard=9x, mc_ref=5x, margin_buffer=0.95) Usage: cd nautilus_dolphin/ python dvae/exp15_stop_gate.py --subset 14 --top_k 20 # Phase 1 (14-day screening) python dvae/exp15_stop_gate.py --subset 0 --top_k 0 # Phase 2 (full 56 days) """ import sys, os, time, json, warnings, argparse import io sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', errors='replace', line_buffering=True) warnings.filterwarnings('ignore') import numpy as np import pandas as pd from pathlib import Path ROOT = Path(__file__).resolve().parent.parent.parent ND_ROOT = ROOT / 'nautilus_dolphin' sys.path.insert(0, str(ND_ROOT)) from dvae.convnext_sensor import ConvNextSensor from nautilus_dolphin.nautilus.proxy_boost_engine import ( LiquidationGuardEngine, D_LIQ_SOFT_CAP, D_LIQ_ABS_CAP, D_LIQ_MC_REF, D_LIQ_MARGIN_BUF, create_d_liq_engine, ) from nautilus_dolphin.nautilus.ob_features import ( OBFeatureEngine, compute_imbalance_nb, compute_depth_1pct_nb, compute_depth_quality_nb, compute_fill_probability_nb, compute_spread_proxy_nb, compute_depth_asymmetry_nb, compute_imbalance_persistence_nb, compute_withdrawal_velocity_nb, compute_market_agreement_nb, compute_cascade_signal_nb, ) from nautilus_dolphin.nautilus.ob_provider import MockOBProvider from nautilus_dolphin.nautilus.adaptive_circuit_breaker import AdaptiveCircuitBreaker from nautilus_dolphin.nautilus.alpha_asset_selector import compute_irp_nb, compute_ars_nb, rank_assets_irp_nb from nautilus_dolphin.nautilus.alpha_bet_sizer import compute_sizing_nb from nautilus_dolphin.nautilus.alpha_signal_generator import check_dc_nb from mc.mc_ml import DolphinForewarner # ── JIT warmup ──────────────────────────────────────────────────────────────── print("Warming up JIT...") _p = np.array([1., 2., 3.], dtype=np.float64) compute_irp_nb(_p, -1); compute_ars_nb(1., .5, .01) rank_assets_irp_nb(np.ones((10, 2), dtype=np.float64), 8, -1, 5, 500., 20, 0.20) compute_sizing_nb(-.03, -.02, -.05, 3., .5, 5., .20, True, True, 0., np.zeros(4, dtype=np.int64), np.zeros(4, dtype=np.int64), np.zeros(5, dtype=np.float64), 0, -1, .01, .04) check_dc_nb(_p, 3, 1, .75) _b = np.array([100., 200., 300., 400., 500.], dtype=np.float64) _a = np.array([110., 190., 310., 390., 510.], dtype=np.float64) compute_imbalance_nb(_b, _a); compute_depth_1pct_nb(_b, _a) compute_depth_quality_nb(210., 200.); compute_fill_probability_nb(1.) compute_spread_proxy_nb(_b, _a); compute_depth_asymmetry_nb(_b, _a) compute_imbalance_persistence_nb(np.array([.1, -.1], dtype=np.float64), 2) compute_withdrawal_velocity_nb(np.array([100., 110.], dtype=np.float64), 1) compute_market_agreement_nb(np.array([.1, -.05], dtype=np.float64), 2) compute_cascade_signal_nb(np.array([-.05, -.15], dtype=np.float64), 2, -.10) print(" JIT ready.") MODEL_V2 = ND_ROOT / 'dvae' / 'convnext_model_v2.json' SCANS_DIR = ROOT / 'vbt_cache' KLINES_DIR = ROOT / 'vbt_cache_klines' MC_MODELS = str(ROOT / 'nautilus_dolphin' / 'mc_results' / 'models') OUT_FILE = ROOT / 'exp15_results.json' META_COLS = { 'timestamp', 'scan_number', 'v50_lambda_max_velocity', 'v150_lambda_max_velocity', 'v300_lambda_max_velocity', 'v750_lambda_max_velocity', 'vel_div', 'instability_50', 'instability_150', } FEATURE_COLS = [ 'v50_lambda_max_velocity','v150_lambda_max_velocity', 'v300_lambda_max_velocity','v750_lambda_max_velocity', 'vel_div','instability_50','instability_150', ] BASE_ENGINE_KWARGS = dict( initial_capital=25000., vel_div_threshold=-.02, vel_div_extreme=-.05, min_leverage=.5, max_leverage=5., leverage_convexity=3., fraction=.20, fixed_tp_pct=.0099, stop_pct=1., max_hold_bars=120, use_direction_confirm=True, dc_lookback_bars=7, dc_min_magnitude_bps=.75, dc_skip_contradicts=True, dc_leverage_boost=1., dc_leverage_reduce=.5, use_asset_selection=True, min_irp_alignment=.45, use_sp_fees=True, use_sp_slippage=True, sp_maker_entry_rate=.62, sp_maker_exit_rate=.50, use_ob_edge=True, ob_edge_bps=5., ob_confirm_rate=.40, lookback=100, use_alpha_layers=True, use_dynamic_leverage=True, seed=42, ) D_LIQ_KWARGS = dict( extended_soft_cap=D_LIQ_SOFT_CAP, extended_abs_cap=D_LIQ_ABS_CAP, mc_leverage_ref=D_LIQ_MC_REF, margin_buffer=D_LIQ_MARGIN_BUF, threshold=.35, alpha=1., adaptive_beta=True, ) MC_BASE_CFG = { 'trial_id': 0, 'vel_div_threshold': -.020, 'vel_div_extreme': -.050, 'use_direction_confirm': True, 'dc_lookback_bars': 7, 'dc_min_magnitude_bps': .75, 'dc_skip_contradicts': True, 'dc_leverage_boost': 1.00, 'dc_leverage_reduce': .50, 'vd_trend_lookback': 10, 'min_leverage': .50, 'max_leverage': 5.00, 'leverage_convexity': 3.00, 'fraction': .20, 'use_alpha_layers': True, 'use_dynamic_leverage': True, 'fixed_tp_pct': .0099, 'stop_pct': 1.00, 'max_hold_bars': 120, 'use_sp_fees': True, 'use_sp_slippage': True, 'sp_maker_entry_rate': .62, 'sp_maker_exit_rate': .50, 'use_ob_edge': True, 'ob_edge_bps': 5.00, 'ob_confirm_rate': .40, 'ob_imbalance_bias': -.09, 'ob_depth_scale': 1.00, 'use_asset_selection': True, 'min_irp_alignment': .45, 'lookback': 100, 'acb_beta_high': .80, 'acb_beta_low': .20, 'acb_w750_threshold_pct': 60, } T_WIN = 32 PROXY_B_DIM = 13 # z[13] = proxy_B dim for v2 ep=13 (r=+0.933) # ── ZExitGateEngine ─────────────────────────────────────────────────────────── class ZExitGateEngine(LiquidationGuardEngine): """ Per-trade TP extension (low z13) and/or stop tightening (high z13). Uses z[13] (proxy_B dim from v2 model) as a day-level regime signal: HIGH z13 (> high_thr) = high adversity → tight stop (defense) LOW z13 (< low_thr) = calm/trending → higher TP + extended hold (offense) MID z13 = no override (baseline exit logic) The _try_entry() override ensures overrides apply to EVERY entry on that day, not just the first (which is what _pending_* would do if set only once). """ def __init__(self, *args, # Stop tightening (high adversity) high_thr: float = 99.0, # z13 > this → tight stop tight_stop_pct: float = 0.005, # TP extension (calm/trending) low_thr: float = -99.0, # z13 < this → higher TP wide_tp_pct: float = None, # None = no TP override extended_hold: int = None, # None = no hold override **kwargs): super().__init__(*args, **kwargs) self.high_thr = high_thr self.tight_stop_pct = tight_stop_pct self.low_thr = low_thr self.wide_tp_pct = wide_tp_pct self.extended_hold = extended_hold self._z13_today = 0.0 self._n_stop_triggered = 0 self._n_tp_triggered = 0 self._n_hold_triggered = 0 def set_day_z13(self, z13: float): self._z13_today = z13 def _try_entry(self, *args, **kwargs): z = self._z13_today # Set overrides fresh before EVERY entry (not just the first) if z > self.high_thr: self._pending_stop_override = self.tight_stop_pct self._pending_tp_override = None self._pending_max_hold_override = None self._n_stop_triggered += 1 elif z < self.low_thr: self._pending_stop_override = None self._pending_tp_override = self.wide_tp_pct self._pending_max_hold_override = self.extended_hold self._n_tp_triggered += 1 if self.extended_hold: self._n_hold_triggered += 1 else: self._pending_stop_override = None self._pending_tp_override = None self._pending_max_hold_override = None return super()._try_entry(*args, **kwargs) def get_trigger_counts(self): return { 'n_stop_triggered': self._n_stop_triggered, 'n_tp_triggered': self._n_tp_triggered, 'n_hold_triggered': self._n_hold_triggered, } # ── Config generation ───────────────────────────────────────────────────────── def generate_configs(): """Generate all 62 configs for exp15.""" configs = [] # FAMILY A — Stop tightening only [12 configs] high_thrs = [0.5, 0.8, 1.0, 1.2] tight_stops = [0.003, 0.005, 0.010] for high_thr in high_thrs: for tight_stop in tight_stops: name = f'A_ht{high_thr}_stop{tight_stop}' configs.append({ 'name': name, 'family': 'A', 'high_thr': high_thr, 'tight_stop_pct': tight_stop, 'low_thr': -99.0, 'wide_tp_pct': None, 'extended_hold': None, }) # FAMILY B — TP extension only [20 configs] low_thrs = [-99.0, 0.3, 0.0, -0.3, -0.5] wide_tps = [0.0110, 0.0120, 0.0130, 0.0150] for low_thr in low_thrs: for wide_tp in wide_tps: name = f'B_lt{low_thr}_tp{wide_tp:.4f}' configs.append({ 'name': name, 'family': 'B', 'high_thr': 99.0, 'tight_stop_pct': 0.005, 'low_thr': low_thr, 'wide_tp_pct': wide_tp, 'extended_hold': None, }) # FAMILY C — Hold extension only [12 configs] low_thrs = [-99.0, 0.3, 0.0, -0.3] extended_holds = [150, 180, 240] for low_thr in low_thrs: for hold in extended_holds: name = f'C_lt{low_thr}_hold{hold}' configs.append({ 'name': name, 'family': 'C', 'high_thr': 99.0, 'tight_stop_pct': 0.005, 'low_thr': low_thr, 'wide_tp_pct': None, 'extended_hold': hold, }) # FAMILY D — TP + Hold combined [12 configs] combos = [ (-99.0, 0.0120, 150), (-99.0, 0.0130, 150), (-99.0, 0.0150, 180), (-99.0, 0.0120, 180), (-99.0, 0.0130, 180), (-99.0, 0.0150, 240), (0.3, 0.0120, 150), (0.3, 0.0130, 150), (0.3, 0.0150, 180), (0.3, 0.0120, 180), (0.3, 0.0130, 180), (0.3, 0.0150, 240), ] for low_thr, wide_tp, hold in combos: name = f'D_lt{low_thr}_tp{wide_tp:.4f}_hold{hold}' configs.append({ 'name': name, 'family': 'D', 'high_thr': 99.0, 'tight_stop_pct': 0.005, 'low_thr': low_thr, 'wide_tp_pct': wide_tp, 'extended_hold': hold, }) # FAMILY E — Asymmetric bidirectional [6 configs] combos = [ (1.0, 0.005, 0.0, 0.0120, None), (1.0, 0.005, 0.0, 0.0130, None), (1.0, 0.005, -0.3, 0.0120, None), (1.0, 0.005, -0.3, 0.0130, None), (1.0, 0.005, 0.0, 0.0120, 150), (1.0, 0.005, -0.3, 0.0130, 150), ] for high_thr, tight_stop, low_thr, wide_tp, hold in combos: name = f'E_ht{high_thr}_stop{tight_stop}_lt{low_thr}_tp{wide_tp:.4f}' if hold: name += f'_hold{hold}' configs.append({ 'name': name, 'family': 'E', 'high_thr': high_thr, 'tight_stop_pct': tight_stop, 'low_thr': low_thr, 'wide_tp_pct': wide_tp, 'extended_hold': hold, }) return configs # ── Data helpers (process_day pattern — same as exp14) ──────────────────────── def _load_pq_data(parquet_files): """Load all 5s parquet files into pq_data dict (date_str → (df, acols, dvol)).""" print("Loading 5s parquet data...") pq_data = {} for pf in parquet_files: pf = Path(pf) df = pd.read_parquet(pf) ac = [c for c in df.columns if c not in META_COLS] bp = df['BTCUSDT'].values if 'BTCUSDT' in df.columns else None dv = np.full(len(df), np.nan) if bp is not None: for i in range(50, len(bp)): seg = bp[max(0, i - 50):i] if len(seg) >= 10: dv[i] = float(np.std(np.diff(seg) / seg[:-1])) pq_data[pf.stem] = (df, ac, dv) print(f" Loaded {len(pq_data)} days") return pq_data def _make_ob_acb(parquet_files_paths, pq_data: dict): """Create fresh OBFeatureEngine + ACB + Forewarner combo for one run.""" pf_list = [Path(p) for p in parquet_files_paths] OB_ASSETS = sorted({a for ds, (_, ac, _) in pq_data.items() for a in ac}) if not OB_ASSETS: OB_ASSETS = ['BTCUSDT', 'ETHUSDT', 'BNBUSDT', 'SOLUSDT'] mock_ob = MockOBProvider( imbalance_bias=-.09, depth_scale=1., assets=OB_ASSETS, imbalance_biases={ "BTCUSDT": -.086, "ETHUSDT": -.092, "BNBUSDT": +.05, "SOLUSDT": +.05, }, ) ob_eng = OBFeatureEngine(mock_ob) ob_eng.preload_date("mock", OB_ASSETS) forewarner = DolphinForewarner(models_dir=MC_MODELS) acb = AdaptiveCircuitBreaker() acb.preload_w750([pf.stem for pf in pf_list]) return ob_eng, acb, forewarner def _compute_metrics(engine, elapsed): """Extract ROI/DD/Calmar/T from a finished engine.""" trades = engine.trade_history roi = (engine.capital - 25000.) / 25000. * 100. cap_curve = [25000.] for t_ in sorted(trades, key=lambda x: getattr(x, 'exit_bar', 0)): cap_curve.append(cap_curve[-1] + getattr(t_, 'pnl_absolute', 0.)) cap_arr = np.array(cap_curve) peak = np.maximum.accumulate(cap_arr) dd = float(np.max((peak - cap_arr) / (peak + 1e-10)) * 100.) calmar = roi / max(dd, 1e-4) sh = getattr(engine, '_scale_history', []) return { 'T': len(trades), 'roi': round(roi, 4), 'dd': round(dd, 4), 'calmar': round(calmar, 4), 'elapsed_s': round(elapsed, 1), 'scale_mean': round(float(np.mean(sh)), 4) if sh else 1.0, } def precompute_z13_per_day(parquet_files_1m, sensor): """ Compute daily mean z[13] from 1m klines files. Returns dict: date_str → float (mean z[13] over T_WIN windows in that day) """ print("Precomputing daily z[13] from 1m klines...") z13_by_date = {} for f in parquet_files_1m: date_str = Path(f).stem[:10] try: df = pd.read_parquet(f, columns=FEATURE_COLS).dropna() if len(df) < T_WIN + 5: continue z13_vals = [] for start in range(0, len(df) - T_WIN, T_WIN // 2): try: z_mu, _ = sensor.encode_window(df, start + T_WIN) z13_vals.append(float(z_mu[PROXY_B_DIM])) except Exception: pass if z13_vals: z13_by_date[date_str] = float(np.mean(z13_vals)) except Exception: pass print(f" {len(z13_by_date)} days with z[13]") return z13_by_date # ── Single config runner ─────────────────────────────────────────────────────── def run_one(cfg: dict, z13_by_date: dict, pq_data: dict, parquet_files: list, vol_p60: float, subset_days: int = 0) -> dict: """Run ZExitGateEngine for one config using process_day API.""" files = [Path(f) for f in parquet_files] if subset_days > 0: files = files[:subset_days] ob_eng, acb, forewarner = _make_ob_acb([str(f) for f in files], pq_data) engine = ZExitGateEngine( **BASE_ENGINE_KWARGS, **D_LIQ_KWARGS, high_thr = cfg['high_thr'], tight_stop_pct = cfg['tight_stop_pct'], low_thr = cfg['low_thr'], wide_tp_pct = cfg['wide_tp_pct'], extended_hold = cfg['extended_hold'], ) engine.set_ob_engine(ob_eng) engine.set_acb(acb) engine.set_mc_forewarner(forewarner, MC_BASE_CFG) engine.set_esoteric_hazard_multiplier(0.) t0 = time.time() for pf in files: ds = pf.stem if ds not in pq_data: continue df, acols, dvol = pq_data[ds] vol_ok = np.where(np.isfinite(dvol), dvol > vol_p60, False) engine.set_day_z13(z13_by_date.get(ds, 0.0)) engine.process_day(ds, df, acols, vol_regime_ok=vol_ok) result = _compute_metrics(engine, time.time() - t0) result.update(engine.get_trigger_counts()) return result def run_baseline(pq_data: dict, parquet_files: list, vol_p60: float, subset_days: int = 0) -> dict: """Run D_LIQ_GOLD baseline (no override) on pre-loaded pq_data.""" files = [Path(f) for f in parquet_files] if subset_days > 0: files = files[:subset_days] ob_eng, acb, forewarner = _make_ob_acb([str(f) for f in files], pq_data) engine = create_d_liq_engine(**BASE_ENGINE_KWARGS) engine.set_ob_engine(ob_eng) engine.set_acb(acb) engine.set_mc_forewarner(forewarner, MC_BASE_CFG) engine.set_esoteric_hazard_multiplier(0.) t0 = time.time() for pf in files: ds = pf.stem if ds not in pq_data: continue df, acols, dvol = pq_data[ds] vol_ok = np.where(np.isfinite(dvol), dvol > vol_p60, False) engine.process_day(ds, df, acols, vol_regime_ok=vol_ok) return _compute_metrics(engine, time.time() - t0) # ── Main ───────────────────────────────────────────────────────────────────── def main(): parser = argparse.ArgumentParser() parser.add_argument('--subset', type=int, default=14, help='Days for Phase 1 (0=all)') parser.add_argument('--top_k', type=int, default=20, help='Top configs for Phase 2') args = parser.parse_args() print("=" * 80) print("exp15 — z[13]-Gated Exit Manager: Stop Tightening AND TP Extension") print("=" * 80) # ── Load sensor ────────────────────────────────────────────────────────── print(f"\nLoading v2 model from {MODEL_V2}...") assert MODEL_V2.exists(), f"Model not found: {MODEL_V2}" sensor = ConvNextSensor(str(MODEL_V2)) print(f" Loaded: epoch={sensor.epoch} val_loss={sensor.val_loss:.4f} z_dim={sensor.z_dim}") # ── Load data files ─────────────────────────────────────────────────────── print("\nLoading data files...") scans_5s = sorted(Path(SCANS_DIR).glob('*.parquet')) klines_1m = sorted(Path(KLINES_DIR).glob('*.parquet')) scans_5s = [f for f in scans_5s if '2025-12-31' <= f.stem[:10] <= '2026-02-25'] klines_1m = [f for f in klines_1m if '2025-12-31' <= f.stem[:10] <= '2026-02-25'] print(f" 5s scans: {len(scans_5s)} 1m klines: {len(klines_1m)}") # ── Pre-load pq_data (once, reused for every run) ───────────────────────── print("\nPre-loading 5s parquet data (done once for all runs)...") pq_data_full = _load_pq_data([str(f) for f in scans_5s]) all_vols = [] for _, (_, _, dv) in pq_data_full.items(): all_vols.extend(dv[np.isfinite(dv)].tolist()) vol_p60 = float(np.percentile(all_vols, 60)) if all_vols else 0.0 print(f" vol_p60={vol_p60:.6f}") # ── Precompute z[13] per day ────────────────────────────────────────────── z13_by_date = precompute_z13_per_day([str(f) for f in klines_1m], sensor) # ── Generate configs ────────────────────────────────────────────────────── configs = generate_configs() print(f"\nTotal configs: {len(configs)}") for family in ['A', 'B', 'C', 'D', 'E']: n = len([c for c in configs if c['family'] == family]) print(f" Family {family}: {n} configs") # ── Baseline ────────────────────────────────────────────────────────────── print("\nRunning BASELINE (D_LIQ_GOLD)...") t0 = time.time() baseline = run_baseline(pq_data_full, [str(f) for f in scans_5s], vol_p60, args.subset) bROI = baseline.get('roi', 0.0) bDD = baseline.get('dd', 0.0) bCal = baseline.get('calmar', 0.0) bT = baseline.get('T', 0) print(f" Baseline: T={bT} ROI={bROI:.2f}% DD={bDD:.2f}% Calmar={bCal:.2f} ({time.time()-t0:.0f}s)") # ── Phase 1: screening ──────────────────────────────────────────────────── print(f"\n{'='*65}") print(f"Phase 1 — screening {len(configs)} configs on {args.subset or 56}-day window") print(f"{'='*65}") results = [] for i, cfg in enumerate(configs): t0 = time.time() res = run_one(cfg, z13_by_date, pq_data_full, [str(f) for f in scans_5s], vol_p60, args.subset) roi = res.get('roi', 0.0) dd = res.get('dd', 0.0) cal = res.get('calmar', 0.0) T = res.get('T', 0) n_stop = res.get('n_stop_triggered', 0) n_tp = res.get('n_tp_triggered', 0) n_hold = res.get('n_hold_triggered', 0) dROI = roi - bROI dDD = dd - bDD dCal = cal - bCal elapsed = time.time() - t0 print(f"[{i+1:3d}/{len(configs)}] {cfg['name']}") print(f" T={T} ROI={roi:.2f}% DD={dd:.2f}% Calmar={cal:.2f} " f"dROI={dROI:+.2f}pp dDD={dDD:+.2f}pp dCal={dCal:+.2f} " f"stop={n_stop} tp={n_tp} hold={n_hold} ({elapsed:.0f}s)") results.append({**cfg, 'roi': roi, 'dd': dd, 'calmar': cal, 'trades': T, 'dROI': dROI, 'dDD': dDD, 'dCal': dCal, 'n_stop_triggered': n_stop, 'n_tp_triggered': n_tp, 'n_hold_triggered': n_hold}) results.sort(key=lambda x: x['dROI'], reverse=True) print(f"\nPhase 1 Top 10:") for r in results[:10]: print(f" dROI={r['dROI']:+.2f}pp ROI={r['roi']:.2f}% " f"Cal={r['calmar']:.2f} stop={r['n_stop_triggered']} {r['name']}") # ── Phase 2: full validation ────────────────────────────────────────────── p2_results = [] if args.top_k > 0 and args.subset > 0: top_cfgs = [c for c in results[:args.top_k]] print(f"\n{'='*65}") print(f"Phase 2 — validating top {len(top_cfgs)} configs on FULL 56 days") print(f"{'='*65}") print("\nRunning baseline (full 56 days)...") t0 = time.time() base_full = run_baseline(pq_data_full, [str(f) for f in scans_5s], vol_p60, 0) bROI_f = base_full.get('roi', 0.0) bDD_f = base_full.get('dd', 0.0) bCal_f = base_full.get('calmar', 0.0) bT_f = base_full.get('T', 0) print(f" Baseline full: T={bT_f} ROI={bROI_f:.2f}% DD={bDD_f:.2f}% " f"Calmar={bCal_f:.2f} ({time.time()-t0:.0f}s)") for i, cfg in enumerate(top_cfgs): t0 = time.time() res = run_one(cfg, z13_by_date, pq_data_full, [str(f) for f in scans_5s], vol_p60, 0) roi = res.get('roi', 0.0) dd = res.get('dd', 0.0) cal = res.get('calmar', 0.0) T = res.get('T', 0) n_stop = res.get('n_stop_triggered', 0) n_tp = res.get('n_tp_triggered', 0) dROI = roi - bROI_f dDD = dd - bDD_f dCal = cal - bCal_f print(f"[P2 {i+1:2d}/{len(top_cfgs)}] {cfg['name']}") print(f" T={T} ROI={roi:.2f}% DD={dd:.2f}% Calmar={cal:.2f} " f"dROI={dROI:+.2f}pp dDD={dDD:+.2f}pp dCal={dCal:+.2f} " f"stop={n_stop} tp={n_tp} ({time.time()-t0:.0f}s)") p2_results.append({**cfg, 'roi': roi, 'dd': dd, 'calmar': cal, 'trades': T, 'dROI': dROI, 'dDD': dDD, 'dCal': dCal, 'n_stop_triggered': n_stop, 'n_tp_triggered': n_tp}) # ── Save results ────────────────────────────────────────────────────────── output = { 'baseline_p1': baseline, 'p1_results': results, 'p2_results': p2_results, 'phase': '1+2' if p2_results else '1', 'n_configs': len(configs), } with open(OUT_FILE, 'w') as f: json.dump(output, f, indent=2, default=str) print(f"\nResults saved to {OUT_FILE}") if p2_results: p2_sorted = sorted(p2_results, key=lambda x: x['dROI'], reverse=True) print(f"\nPhase 2 Top 5 by ROI delta:") for r in p2_sorted[:5]: print(f" dROI={r['dROI']:+.2f}pp DD={r['dd']:.2f}% Cal={r['calmar']:.2f} " f"stop={r['n_stop_triggered']} {r['name']}") print("\n[DONE]") if __name__ == '__main__': main()