Files
DOLPHIN/nautilus_dolphin/test_pf_mtf_5s_1m.py
hjnormey 01c19662cb initial: import DOLPHIN baseline 2026-04-21 from dolphinng5_predict working tree
Includes core prod + GREEN/BLUE subsystems:
- prod/ (BLUE harness, configs, scripts, docs)
- nautilus_dolphin/ (GREEN Nautilus-native impl + dvae/ preserved)
- adaptive_exit/ (AEM engine + models/bucket_assignments.pkl)
- Observability/ (EsoF advisor, TUI, dashboards)
- external_factors/ (EsoF producer)
- mc_forewarning_qlabs_fork/ (MC regime/envelope)

Excludes runtime caches, logs, backups, and reproducible artifacts per .gitignore.
2026-04-21 16:58:38 +02:00

579 lines
28 KiB
Python
Executable File
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

"""MTF 5s + 1m Combined Experiment — Orthogonal Leverage Modulation.
Hypothesis: 1m vel_div (klines-derived, orthogonal to 5s) acts as an INDEPENDENT
second-order leverage modulator. When the 1m signal also fires bearishly on the
same day as the 5s system, it's a genuinely rare joint event with higher conviction.
Architecture (Iron Rule preserved — 1m never touches entry gating):
- Baseline: champion 5s engine on 55-day window (vbt_cache, Dec31-Feb25)
- MTF: same engine + MTFBoostACB wrapper that multiplies ACB boost by
a 1m-alignment factor derived from vbt_cache_klines for same dates
MTFBoostACB mechanism:
- Compute per-day: align_frac = fraction of 1m bars with vel_div < VD_1M_THRESHOLD
(VD_1M_THRESHOLD = -0.50, the klines p~7 threshold, matching signal selectivity)
- MTF multiplier: 1.0 + MTF_MAX_BOOST * min(align_frac / ALIGN_NORM, 1.0)
MTF_MAX_BOOST = 0.15 (up to +15% leverage boost)
ALIGN_NORM = 0.15 (15% of 1m bars bearish = max boost day)
- Final ACB boost (MTF) = ACB_boost_base * mtf_mult
- Result feeds into engine's 3-scale formula: regime_size_mult = ACB_boost * (1 + beta * strength^3)
- abs_max_leverage = 6.0 ceiling unchanged — MTF can only push within it
Expected (from prior analysis):
- Cross-corr r<0.013 at all lags → signals are orthogonal alpha sources
- MTF boost should add +3-5% ROI, WR lift 49%→52-54%
- Joint firing is genuinely rare (both p~7) → selective, not noise
Timescale comparison:
- 5s bars: 1 bar per 5s → 17,280 bars/day, typical entries ~40/day
- 1m bars: 1 bar per 1min → 1,440 bars/day, used as DAILY aggregate only
Saved outputs:
- run_logs/mtf_5s_1m_{TS}.json (summary: baseline vs MTF)
- run_logs/mtf_daily_baseline_{TS}.csv (per-day stats, baseline)
- run_logs/mtf_daily_mtf_{TS}.csv (per-day stats, MTF)
- run_logs/mtf_alignment_{TS}.csv (per-day 1m alignment + mtf_mult)
"""
import sys, time, math, json, csv
sys.stdout.reconfigure(encoding='utf-8', errors='replace')
from pathlib import Path
from datetime import datetime
from collections import defaultdict
import numpy as np
import pandas as pd
sys.path.insert(0, str(Path(__file__).parent))
# ── JIT warmup ─────────────────────────────────────────────────────────────────
print("Compiling numba kernels...")
t0c = time.time()
from nautilus_dolphin.nautilus.alpha_asset_selector import compute_irp_nb, compute_ars_nb, rank_assets_irp_nb
from nautilus_dolphin.nautilus.alpha_bet_sizer import compute_sizing_nb
from nautilus_dolphin.nautilus.alpha_signal_generator import check_dc_nb
from nautilus_dolphin.nautilus.ob_features import (
OBFeatureEngine, compute_imbalance_nb, compute_depth_1pct_nb,
compute_depth_quality_nb, compute_fill_probability_nb, compute_spread_proxy_nb,
compute_depth_asymmetry_nb, compute_imbalance_persistence_nb,
compute_withdrawal_velocity_nb, compute_market_agreement_nb, compute_cascade_signal_nb,
)
from nautilus_dolphin.nautilus.ob_provider import MockOBProvider
_p = np.array([1.0, 2.0, 3.0], dtype=np.float64)
compute_irp_nb(_p, -1); compute_ars_nb(1.0, 0.5, 0.01)
rank_assets_irp_nb(np.ones((10, 2), dtype=np.float64), 8, -1, 5, 500.0, 20, 0.20)
compute_sizing_nb(-0.03, -0.02, -0.05, 3.0, 0.5, 5.0, 0.20, True, True, 0.0,
np.zeros(4, dtype=np.int64), np.zeros(4, dtype=np.int64),
np.zeros(5, dtype=np.float64), 0, -1, 0.01, 0.04)
check_dc_nb(_p, 3, 1, 0.75)
_b = np.array([100.0, 200.0, 300.0, 400.0, 500.0], dtype=np.float64)
_a = np.array([110.0, 190.0, 310.0, 390.0, 510.0], dtype=np.float64)
compute_imbalance_nb(_b, _a); compute_depth_1pct_nb(_b, _a)
compute_depth_quality_nb(210.0, 200.0); compute_fill_probability_nb(1.0)
compute_spread_proxy_nb(_b, _a); compute_depth_asymmetry_nb(_b, _a)
compute_imbalance_persistence_nb(np.array([0.1, -0.1], dtype=np.float64), 2)
compute_withdrawal_velocity_nb(np.array([100.0, 110.0], dtype=np.float64), 1)
compute_market_agreement_nb(np.array([0.1, -0.05], dtype=np.float64), 2)
compute_cascade_signal_nb(np.array([-0.05, -0.15], dtype=np.float64), 2, -0.10)
print(f" JIT: {time.time() - t0c:.1f}s")
from nautilus_dolphin.nautilus.esf_alpha_orchestrator import NDAlphaEngine
from nautilus_dolphin.nautilus.adaptive_circuit_breaker import AdaptiveCircuitBreaker, ACBConfig
from mc.mc_ml import DolphinForewarner
# ── Paths ───────────────────────────────────────────────────────────────────────
VBT_5S_DIR = Path(r"C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict\vbt_cache")
VBT_1M_DIR = Path(r"C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict\vbt_cache_klines")
DATE_START = '2025-12-31'
DATE_END = '2026-02-25' # 55-day champion window
# ── MTF parameters ─────────────────────────────────────────────────────────────
VD_1M_THRESHOLD = -0.50 # klines p~7 (matches 5s p~7 selectivity)
MTF_MAX_BOOST = 0.15 # max additional boost fraction (+15%)
ALIGN_NORM = 0.15 # 15% of 1m bars bearish = max-boost day
# typical: ~7% → +7% boost; extreme: 15%+ → +15%
# ── Champion engine parameters (frozen — do NOT change) ─────────────────────────
META_COLS = {'timestamp', 'scan_number', 'v50_lambda_max_velocity', 'v150_lambda_max_velocity',
'v300_lambda_max_velocity', 'v750_lambda_max_velocity', 'vel_div',
'instability_50', 'instability_150'}
ENGINE_KWARGS = dict(
initial_capital=25000.0, vel_div_threshold=-0.02, vel_div_extreme=-0.05,
min_leverage=0.5, max_leverage=5.0, leverage_convexity=3.0,
fraction=0.20, fixed_tp_pct=0.0099, stop_pct=1.0, max_hold_bars=120,
use_direction_confirm=True, dc_lookback_bars=7, dc_min_magnitude_bps=0.75,
dc_skip_contradicts=True, dc_leverage_boost=1.0, dc_leverage_reduce=0.5,
use_asset_selection=True, min_irp_alignment=0.45,
use_sp_fees=True, use_sp_slippage=True,
sp_maker_entry_rate=0.62, sp_maker_exit_rate=0.50,
use_ob_edge=True, ob_edge_bps=5.0, ob_confirm_rate=0.40,
lookback=100, use_alpha_layers=True, use_dynamic_leverage=True, seed=42,
)
MC_MODELS_DIR = str(Path(r"C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict\nautilus_dolphin\mc_results\models"))
MC_BASE_CFG = {
'trial_id': 0,
'vel_div_threshold': -0.020, 'vel_div_extreme': -0.050,
'use_direction_confirm': True, 'dc_lookback_bars': 7,
'dc_min_magnitude_bps': 0.75, 'dc_skip_contradicts': True,
'dc_leverage_boost': 1.00, 'dc_leverage_reduce': 0.50,
'vd_trend_lookback': 10, 'min_leverage': 0.50, 'max_leverage': 5.00,
'leverage_convexity': 3.00, 'fraction': 0.20,
'use_alpha_layers': True, 'use_dynamic_leverage': True,
'fixed_tp_pct': 0.0099, 'stop_pct': 1.00, 'max_hold_bars': 120,
'use_sp_fees': True, 'use_sp_slippage': True,
'sp_maker_entry_rate': 0.62, 'sp_maker_exit_rate': 0.50,
'use_ob_edge': True, 'ob_edge_bps': 5.00, 'ob_confirm_rate': 0.40,
'ob_imbalance_bias': -0.09, 'ob_depth_scale': 1.00,
'use_asset_selection': True, 'min_irp_alignment': 0.45, 'lookback': 100,
'acb_beta_high': 0.80, 'acb_beta_low': 0.20, 'acb_w750_threshold_pct': 60,
}
OB_ASSETS = ["BTCUSDT", "ETHUSDT", "BNBUSDT", "SOLUSDT"]
# ── MTFBoostACB: ACB wrapper that injects 1m alignment as leverage modulator ─────
class MTFBoostACB:
"""Wraps AdaptiveCircuitBreaker, multiplying per-date boost by 1m alignment factor.
Iron Rule preserved: 1m signal NEVER touches entry gating (vel_div threshold).
Only affects position SIZING via ACB boost channel.
MTF boost formula:
align_frac = fraction of 1m bars where vel_div < VD_1M_THRESHOLD
mtf_mult = 1.0 + MTF_MAX_BOOST * min(align_frac / ALIGN_NORM, 1.0)
info['boost'] *= mtf_mult # rest of ACB info unchanged
"""
def __init__(self, base_acb, klines_align: dict,
max_boost: float = MTF_MAX_BOOST,
align_norm: float = ALIGN_NORM):
self._acb = base_acb
self._klines_align = klines_align # date_str -> align_frac [0, 1]
self._max_boost = max_boost
self._align_norm = align_norm
self.mtf_log = {} # date_str -> (align_frac, mtf_mult, final_boost)
def __getattr__(self, name):
return getattr(self._acb, name)
def get_dynamic_boost_for_date(self, date_str: str, ob_engine=None) -> dict:
info = dict(self._acb.get_dynamic_boost_for_date(date_str, ob_engine=ob_engine))
align = self._klines_align.get(date_str, 0.0)
mtf_mult = 1.0 + self._max_boost * min(align / self._align_norm, 1.0) if self._align_norm > 0 else 1.0
info['boost'] *= mtf_mult
info['mtf_align'] = align
info['mtf_mult'] = mtf_mult
self.mtf_log[date_str] = (align, mtf_mult, info['boost'])
return info
# ── Compute engine results helper ───────────────────────────────────────────────
def compute_metrics(dstats, initial_cap=25000.0):
capitals = [s['capital'] for s in dstats]
pnls = [s['pnl'] for s in dstats]
final_cap = capitals[-1] if capitals else initial_cap
roi = (final_cap - initial_cap) / initial_cap * 100
peak = initial_cap; max_dd = 0.0
for c in capitals:
if c > peak: peak = c
dd = (peak - c) / peak * 100
if dd > max_dd: max_dd = dd
pnl_arr = np.array(pnls)
sharpe = float(pnl_arr.mean() / pnl_arr.std() * np.sqrt(252)) if pnl_arr.std() > 0 else 0.0
return roi, max_dd, sharpe, final_cap
def compute_trade_metrics(trade_history):
wins = [t for t in trade_history if t.pnl_absolute > 0]
losses = [t for t in trade_history if t.pnl_absolute <= 0]
gw = sum(t.pnl_absolute for t in wins)
gl = abs(sum(t.pnl_absolute for t in losses))
pf = gw / gl if gl > 0 else float('inf')
wr = len(wins) / len(trade_history) * 100 if trade_history else 0.0
avg_bars = float(np.mean([t.bars_held for t in trade_history])) if trade_history else 0.0
avg_win = float(np.mean([t.pnl_pct for t in wins]) * 100) if wins else 0.0
avg_loss = float(np.mean([t.pnl_pct for t in losses]) * 100) if losses else 0.0
return pf, wr, avg_bars, avg_win, avg_loss, len(trade_history)
# ── Load MC-Forewarner ──────────────────────────────────────────────────────────
print("\nLoading MC-Forewarner...")
forewarner = DolphinForewarner(models_dir=MC_MODELS_DIR)
print(" MC-Forewarner ready")
# ── Load 5s champion parquet files ─────────────────────────────────────────────
parquet_5s = sorted(
p for p in VBT_5S_DIR.glob("*.parquet")
if 'catalog' not in str(p) and DATE_START <= p.stem <= DATE_END
)
date_strings = [pf.stem for pf in parquet_5s]
print(f"\n5s champion parquets: {len(parquet_5s)} dates ({date_strings[0]} to {date_strings[-1]})")
# ── Vol p60 calibration (from first 2 5s files, champion standard) ──────────────
all_vols = []
for pf in parquet_5s[:2]:
df = pd.read_parquet(pf)
if 'BTCUSDT' not in df.columns: continue
pr = df['BTCUSDT'].values
for i in range(60, len(pr)):
seg = pr[max(0,i-50):i]
if len(seg)<10: continue
v = float(np.std(np.diff(seg)/seg[:-1]))
if v > 0: all_vols.append(v)
vol_p60 = float(np.percentile(all_vols, 60))
print(f"Vol p60 (5s calibration): {vol_p60:.6f}")
# ── Pre-load 5s parquet data ────────────────────────────────────────────────────
print(f"\nPre-loading {len(parquet_5s)} 5s parquet files...")
t_load = time.time()
pq_data_5s = {}
for pf in parquet_5s:
df = pd.read_parquet(pf)
ac = [c for c in df.columns if c not in META_COLS]
bp = df['BTCUSDT'].values if 'BTCUSDT' in df.columns else None
dv = np.full(len(df), np.nan)
if bp is not None:
for i in range(50, len(bp)):
seg = bp[max(0,i-50):i]
if len(seg)<10: continue
dv[i] = float(np.std(np.diff(seg)/seg[:-1]))
pq_data_5s[pf.stem] = (df, ac, dv)
print(f" Done in {time.time()-t_load:.1f}s")
# ── Load 1m klines and compute per-day alignment fraction ──────────────────────
print(f"\nComputing 1m alignment fractions (VD threshold {VD_1M_THRESHOLD})...")
klines_align = {} # date_str -> align_frac [0, 1]
klines_stats = {} # date_str -> {n_bars, n_bearish, align_frac, vd_median, vd_p5, vd_p95}
missing_1m = []
for ds in date_strings:
pf_1m = VBT_1M_DIR / f"{ds}.parquet"
if not pf_1m.exists():
klines_align[ds] = 0.0
missing_1m.append(ds)
continue
try:
df_1m = pd.read_parquet(pf_1m)
if 'vel_div' not in df_1m.columns:
klines_align[ds] = 0.0
missing_1m.append(ds)
continue
vd = df_1m['vel_div'].dropna().values
n_valid = len(vd)
n_bearish = int(np.sum(vd < VD_1M_THRESHOLD))
align_frac = n_bearish / n_valid if n_valid > 0 else 0.0
klines_align[ds] = align_frac
klines_stats[ds] = {
'n_bars': n_valid,
'n_bearish': n_bearish,
'align_frac': align_frac,
'vd_median': float(np.median(vd)),
'vd_p5': float(np.percentile(vd, 5)),
'vd_p95': float(np.percentile(vd, 95)),
}
except Exception as e:
print(f" WARNING: could not load 1m parquet for {ds}: {e}")
klines_align[ds] = 0.0
missing_1m.append(ds)
if missing_1m:
print(f" WARNING: {len(missing_1m)} dates missing 1m klines data: {missing_1m[:5]}...")
else:
print(f" All {len(date_strings)} dates have 1m klines data")
# Alignment summary
aligns = [klines_align[ds] for ds in date_strings]
mtf_mults = [1.0 + MTF_MAX_BOOST * min(a / ALIGN_NORM, 1.0) for a in aligns]
print(f"\n 1m Alignment (fraction bearish bars):")
print(f" Mean: {np.mean(aligns):.3f} (expected ~0.07 = p7 of distribution)")
print(f" Std: {np.std(aligns):.3f}")
print(f" Min: {np.min(aligns):.3f}")
print(f" Max: {np.max(aligns):.3f}")
print(f" MTF multiplier range: [{min(mtf_mults):.3f}, {max(mtf_mults):.3f}]")
print(f" MTF multiplier mean: {np.mean(mtf_mults):.3f}")
high_align = [(ds, a) for ds, a in klines_align.items() if a >= 0.12]
high_align.sort(key=lambda x: -x[1])
print(f" High-alignment days (>=12%): {len(high_align)}")
for ds, a in high_align[:5]:
mult = 1.0 + MTF_MAX_BOOST * min(a / ALIGN_NORM, 1.0)
print(f" {ds}: align={a:.3f} → mtf_mult={mult:.3f}x")
# ── Build OB engine (real-calibrated MockOBProvider) ──────────────────────────
_mock_ob = MockOBProvider(
imbalance_bias=-0.09, depth_scale=1.0, assets=OB_ASSETS,
imbalance_biases={"BTCUSDT": -0.086, "ETHUSDT": -0.092,
"BNBUSDT": +0.05, "SOLUSDT": +0.05},
)
ob_eng = OBFeatureEngine(_mock_ob)
ob_eng.preload_date("mock", OB_ASSETS)
# ── ACB initialization (shared base, cloned for each run) ─────────────────────
def build_acb(date_strings):
acb = AdaptiveCircuitBreaker()
acb.preload_w750(date_strings)
return acb
# ── Engine runner ──────────────────────────────────────────────────────────────
def run_engine(label, acb, pq_data, date_strings, vol_p60):
print(f"\n{'='*65}")
print(f" RUN: {label}")
print(f"{'='*65}")
t0 = time.time()
engine = NDAlphaEngine(**ENGINE_KWARGS)
engine.set_ob_engine(ob_eng)
engine.set_acb(acb)
engine.set_mc_forewarner(forewarner, MC_BASE_CFG)
engine.set_esoteric_hazard_multiplier(0.0)
dstats = []
for ds in date_strings:
if ds not in pq_data:
continue
df, acols, dvol = pq_data[ds]
vol_ok = np.where(np.isfinite(dvol), dvol > vol_p60, False)
result = engine.process_day(ds, df, acols, vol_regime_ok=vol_ok)
dstats.append({
'date': ds,
'pnl': result.get('pnl', 0.0),
'capital': result.get('capital', ENGINE_KWARGS['initial_capital']),
'trades': result.get('trades', 0),
'boost': result.get('boost', 1.0),
'beta': result.get('beta', 0.0),
'mc_status': result.get('mc_status', 'OK'),
})
elapsed = time.time() - t0
roi, max_dd, sharpe, final_cap = compute_metrics(dstats)
pf, wr, avg_bars, avg_win, avg_loss, n_trades = compute_trade_metrics(engine.trade_history)
mid = len(dstats) // 2
h1 = sum(s['pnl'] for s in dstats[:mid])
h2 = sum(s['pnl'] for s in dstats[mid:])
h2h1 = h2 / h1 if h1 != 0 else float('nan')
print(f" ROI: {roi:+.2f}%")
print(f" PF: {pf:.4f}")
print(f" DD: {max_dd:.2f}%")
print(f" Sharpe: {sharpe:.3f}")
print(f" WR: {wr:.1f}% (N={n_trades})")
print(f" AvgWin: {avg_win:+.3f}% AvgLoss: {avg_loss:+.3f}%")
print(f" AvgBars: {avg_bars:.1f}")
print(f" Capital: ${final_cap:,.2f}")
print(f" H1 P&L: ${h1:+,.2f}")
print(f" H2 P&L: ${h2:+,.2f}")
print(f" H2/H1: {h2h1:.3f}")
print(f" Runtime: {elapsed:.1f}s")
return {
'label': label, 'roi': roi, 'pf': pf, 'dd': max_dd, 'sharpe': sharpe,
'wr': wr, 'n_trades': n_trades, 'avg_bars': avg_bars,
'avg_win': avg_win, 'avg_loss': avg_loss, 'final_capital': final_cap,
'h1_pnl': h1, 'h2_pnl': h2, 'h2h1': h2h1, 'elapsed_s': elapsed,
}, dstats, engine.trade_history
# ── RUN 1: BASELINE (standard ACB, no 1m conditioning) ────────────────────────
print(f"\nInitializing ACB (baseline)...")
acb_base = build_acb(date_strings)
print(f" w750 p60 threshold: {acb_base._w750_threshold:.6f}")
print(f" Dates with w750: {sum(1 for v in acb_base._w750_vel_cache.values() if v != 0.0)}/{len(date_strings)}")
base_result, base_dstats, base_trades = run_engine(
"BASELINE (5s only, no 1m)", acb_base, pq_data_5s, date_strings, vol_p60
)
# ── RUN 2: MTF (ACB boost × 1m alignment factor) ──────────────────────────────
print(f"\nInitializing ACB (MTF — wrapping with 1m alignment)...")
acb_base2 = build_acb(date_strings)
mtf_acb = MTFBoostACB(acb_base2, klines_align, max_boost=MTF_MAX_BOOST, align_norm=ALIGN_NORM)
print(f" MTF_MAX_BOOST = {MTF_MAX_BOOST:.2f} ALIGN_NORM = {ALIGN_NORM:.2f}")
mtf_result, mtf_dstats, mtf_trades = run_engine(
f"MTF (5s × 1m align, max_boost={MTF_MAX_BOOST:.2f})", mtf_acb, pq_data_5s, date_strings, vol_p60
)
# ── DELTA ANALYSIS ─────────────────────────────────────────────────────────────
print(f"\n{''*65}")
print(f" MTF vs BASELINE DELTA")
print(f"{''*65}")
delta_roi = mtf_result['roi'] - base_result['roi']
delta_pf = mtf_result['pf'] - base_result['pf']
delta_dd = mtf_result['dd'] - base_result['dd']
delta_sharpe = mtf_result['sharpe'] - base_result['sharpe']
delta_wr = mtf_result['wr'] - base_result['wr']
delta_n = mtf_result['n_trades'] - base_result['n_trades']
delta_h2h1 = mtf_result['h2h1'] - base_result['h2h1']
print(f" ΔROI: {delta_roi:+.2f}% pp ({base_result['roi']:+.2f}% → {mtf_result['roi']:+.2f}%)")
print(f" ΔPF: {delta_pf:+.4f} ({base_result['pf']:.4f}{mtf_result['pf']:.4f})")
print(f" ΔDD: {delta_dd:+.2f}% pp ({base_result['dd']:.2f}% → {mtf_result['dd']:.2f}%)")
print(f" ΔSharpe: {delta_sharpe:+.3f} ({base_result['sharpe']:.3f}{mtf_result['sharpe']:.3f})")
print(f" ΔWR: {delta_wr:+.1f}% pp ({base_result['wr']:.1f}% → {mtf_result['wr']:.1f}%)")
print(f" ΔTrades: {delta_n:+d} ({base_result['n_trades']}{mtf_result['n_trades']})")
print(f" ΔH2/H1: {delta_h2h1:+.3f} ({base_result['h2h1']:.3f}{mtf_result['h2h1']:.3f})")
efficiency_gain = delta_roi / base_result['roi'] * 100 if base_result['roi'] != 0 else 0
dd_cost = delta_dd / base_result['dd'] * 100 if base_result['dd'] != 0 else 0
print(f"\n Efficiency (ΔROI as % of baseline): {efficiency_gain:+.1f}%")
print(f" DD cost (ΔDD as % of baseline): {dd_cost:+.1f}%")
roi_per_dd = delta_roi / delta_dd if delta_dd != 0 else float('inf')
print(f" ROI/DD ratio (Δ): {roi_per_dd:.2f}")
# MTF boost log summary
print(f"\n 1m Alignment → MTF multiplier summary:")
log_items = sorted(mtf_acb.mtf_log.items())
boost_by_date = {ds: info for ds, info in zip(date_strings,
[mtf_acb.mtf_log.get(ds, (0, 1.0, 1.0)) for ds in date_strings])}
high_mult_days = [(ds, a, m, b) for ds, (a, m, b) in mtf_acb.mtf_log.items() if m > 1.05]
high_mult_days.sort(key=lambda x: -x[2])
print(f" High MTF multiplier days (>1.05x): {len(high_mult_days)}")
for ds, a, m, b in high_mult_days[:8]:
print(f" {ds}: align={a:.3f} → mtf_mult={m:.3f}x final_boost={b:.3f}x")
# ── Per-day delta table (top winners and losers) ────────────────────────────────
base_by_date = {s['date']: s for s in base_dstats}
mtf_by_date = {s['date']: s for s in mtf_dstats}
day_deltas = []
for ds in date_strings:
if ds in base_by_date and ds in mtf_by_date:
dp = mtf_by_date[ds]['pnl'] - base_by_date[ds]['pnl']
day_deltas.append((ds, dp, mtf_acb.mtf_log.get(ds, (0.0, 1.0, 1.0))))
day_deltas.sort(key=lambda x: -abs(x[1]))
print(f"\n Largest per-day P&L differences (MTF - Baseline):")
for ds, dp, (a, m, b) in day_deltas[:10]:
print(f" {ds}: ΔPNL={dp:+.0f} align={a:.3f} mtf_mult={m:.3f}x")
# ── Sub-period breakdown ────────────────────────────────────────────────────────
def sub_period_roi(dstats, start, end, label, cap_start):
sub = [s for s in dstats if start <= s['date'] <= end]
if not sub: return
roi_s = (sub[-1]['capital'] - cap_start) / cap_start * 100
t_s = sum(s['trades'] for s in sub)
print(f" {label:30s}: ROI={roi_s:+.1f}% T={t_s}")
print(f"\n Sub-period comparison:")
print(f" {'Period':<30} Baseline MTF")
periods = [
('Jan 2026', '2026-01-01', '2026-01-31'),
('Feb 2026', '2026-02-01', '2026-02-25'),
]
for label, s, e in periods:
b_sub = [x for x in base_dstats if s <= x['date'] <= e]
m_sub = [x for x in mtf_dstats if s <= x['date'] <= e]
if b_sub and m_sub:
# Find cap at start of period
all_b = [x for x in base_dstats if x['date'] < s]
b_cap0 = all_b[-1]['capital'] if all_b else ENGINE_KWARGS['initial_capital']
all_m = [x for x in mtf_dstats if x['date'] < s]
m_cap0 = all_m[-1]['capital'] if all_m else ENGINE_KWARGS['initial_capital']
b_roi = (b_sub[-1]['capital'] - b_cap0) / b_cap0 * 100
m_roi = (m_sub[-1]['capital'] - m_cap0) / m_cap0 * 100
delta = m_roi - b_roi
b_t = sum(x['trades'] for x in b_sub)
m_t = sum(x['trades'] for x in m_sub)
print(f" {label:30s}: {b_roi:+5.1f}% (T={b_t}) {m_roi:+5.1f}% (T={m_t}) Δ={delta:+.1f}%")
# ── Statistical note ────────────────────────────────────────────────────────────
print(f"\n Statistical note:")
print(f" Trade count: baseline={base_result['n_trades']} MTF={mtf_result['n_trades']}")
print(f" (Same entries — MTF only changes sizing, not entry decisions.)")
print(f" Iron Rule preserved: 1m signal NEVER gated entries.")
print(f" 1m vel_div cross-corr with 5s: max |r|<0.013 at all lags (orthogonal)")
# ── Save results ───────────────────────────────────────────────────────────────
ts = datetime.now().strftime('%Y%m%d_%H%M%S')
run_dir = Path(__file__).parent / 'run_logs'
run_dir.mkdir(exist_ok=True)
summary = {
'experiment': 'mtf_5s_1m_combined',
'date_range': f'{DATE_START}_to_{DATE_END}',
'mtf_params': {
'vd_1m_threshold': VD_1M_THRESHOLD,
'mtf_max_boost': MTF_MAX_BOOST,
'align_norm': ALIGN_NORM,
},
'baseline': base_result,
'mtf': mtf_result,
'delta': {
'roi_pp': delta_roi,
'pf': delta_pf,
'dd_pp': delta_dd,
'sharpe': delta_sharpe,
'wr_pp': delta_wr,
'n_trades': delta_n,
'h2h1': delta_h2h1,
'efficiency_pct': efficiency_gain,
},
'alignment_stats': {
'n_dates': len(date_strings),
'n_missing_1m': len(missing_1m),
'align_mean': float(np.mean(aligns)),
'align_std': float(np.std(aligns)),
'align_min': float(np.min(aligns)),
'align_max': float(np.max(aligns)),
'mtf_mult_mean': float(np.mean(mtf_mults)),
'mtf_mult_max': float(np.max(mtf_mults)),
'n_high_align': len(high_mult_days),
},
'engine_kwargs': ENGINE_KWARGS,
'run_ts': ts,
}
with open(run_dir / f'mtf_5s_1m_{ts}.json', 'w') as f:
json.dump(summary, f, indent=2, default=str)
# Save alignment CSV
with open(run_dir / f'mtf_alignment_{ts}.csv', 'w', newline='') as f:
w = csv.writer(f)
w.writerow(['date', 'align_frac', 'n_bearish', 'n_bars', 'mtf_mult',
'vd_median', 'vd_p5', 'vd_p95'])
for ds in date_strings:
st = klines_stats.get(ds, {})
a = klines_align.get(ds, 0.0)
m = 1.0 + MTF_MAX_BOOST * min(a / ALIGN_NORM, 1.0) if ALIGN_NORM > 0 else 1.0
w.writerow([
ds, f'{a:.4f}',
st.get('n_bearish', 0), st.get('n_bars', 0),
f'{m:.4f}',
f'{st.get("vd_median", 0):.4f}',
f'{st.get("vd_p5", 0):.4f}',
f'{st.get("vd_p95", 0):.4f}',
])
# Save daily CSVs
for label, dstats in [('baseline', base_dstats), ('mtf', mtf_dstats)]:
with open(run_dir / f'mtf_daily_{label}_{ts}.csv', 'w', newline='') as f:
w = csv.writer(f)
w.writerow(['date', 'pnl', 'capital', 'trades', 'boost', 'beta', 'mc_status'])
for s in dstats:
w.writerow([s['date'], f'{s["pnl"]:.4f}', f'{s["capital"]:.4f}',
s['trades'], f'{s["boost"]:.4f}', f'{s["beta"]:.4f}', s['mc_status']])
print(f"\nResults saved:")
print(f" {run_dir}/mtf_5s_1m_{ts}.json")
print(f" {run_dir}/mtf_daily_baseline_{ts}.csv")
print(f" {run_dir}/mtf_daily_mtf_{ts}.csv")
print(f" {run_dir}/mtf_alignment_{ts}.csv")
print(f"\n{'='*65}")
print(f" MTF EXPERIMENT COMPLETE")
print(f"{'='*65}")
print(f" Hypothesis: 1m orthogonal conditioning improves 5s alpha")
if delta_roi > 0 and delta_dd <= 2.0:
verdict = "CONFIRMED — positive ROI with controlled DD"
elif delta_roi > 0 and delta_dd > 2.0:
verdict = "PARTIAL — positive ROI but elevated DD; needs calibration"
elif delta_roi <= 0:
verdict = "REJECTED — 1m conditioning does not improve 5s ROI"
else:
verdict = "INCONCLUSIVE"
print(f" Verdict: {verdict}")
print(f" ΔROI: {delta_roi:+.2f}% ΔDD: {delta_dd:+.2f}% ΔSharpe: {delta_sharpe:+.3f}")
print(f"{'='*65}")