Files
DOLPHIN/nautilus_dolphin/macro_gated_long_5y.py

556 lines
25 KiB
Python
Raw Normal View History

"""Macro-Gated LONG/SHORT Crossover Backtest — 5y Klines
=========================================================
Tests the full two-arm architecture:
dvol_btc LOW (calm/bull) LONG arm: vel_div -0.020 entry, vel_div +0.020 exhaustion exit
dvol_btc HIGH (fear/vol) SHORT arm: vel_div +0.020 entry, vel_div -0.020 exhaustion exit
dvol_btc MID (neutral) FLAT or either arm
Entry/exit logic (confirmed from regime_exit_sweep_5y.py):
LONG: enter vel_div <= -ENTRY_T, exit (exhaustion) vel_div >= +ENTRY_T, avgHold=0.6 bars
SHORT: enter vel_div >= +ENTRY_T, exit (exhaustion) vel_div <= -ENTRY_T, avgHold=0.6 bars
Macro gate variants tested:
1. UNGATED both arms always active (baseline)
2. dvol SPLIT at median (p50): LONG when dvol<p50, SHORT when dvol>p50
3. dvol SPLIT at various thresholds: [45,50,55,60,65,70,75]
4. STRICT: LONG when dvol<p25, SHORT when dvol>p75 (only clearest regimes)
5. COMBINED POSTURE: SHORT in high-dvol, LONG in low-dvol, FLAT in mid-band
Additional gates layered:
A. + realized_vol filter (only trade when rv in Q2-Q4, skip Q1 = calm kills SHORT)
B. + btc_return direction (only SHORT on BTC down-days, only LONG on up-days)
C. + fng filter (from NPZ patch)
Painstaking logs:
macro_gated_summary_TS.csv per (gate_config × direction): overall stats
macro_gated_byyear_TS.csv per (gate_config × direction × year)
macro_gated_bydvol_TS.csv per (dvol_decile × direction): raw edge vs dvol level
macro_gated_byhour_TS.csv per (hour_utc × gate × direction): intraday structure
macro_gated_byrvol_TS.csv per (realized_vol_quartile × direction)
macro_gated_dailylog_TS.csv per (date × direction × gate): n_trades, WR, PF that day
"""
import sys, time, csv, gc
sys.stdout.reconfigure(encoding='utf-8', errors='replace')
from pathlib import Path
from datetime import datetime
from collections import defaultdict
import numpy as np
import pandas as pd
from numpy.lib.stride_tricks import sliding_window_view
VBT_DIR = Path(r"C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict\vbt_cache_klines")
EIG_DIR = Path(r"C:\Users\Lenovo\Documents\- Dolphin NG HD (NG3)\correlation_arb512\eigenvalues")
LOG_DIR = Path(r"C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict\nautilus_dolphin\run_logs")
LOG_DIR.mkdir(exist_ok=True)
ENTRY_T = 0.020 # entry threshold (both arms)
# Best exhaustion params from regime_exit_sweep: exhst fires at +ENTRY_T crossover
# Invalidation: inv_t=0.100 (inner, more selective) or 0.200 (outer, permissive)
INV_T = 0.100 # invalidation threshold
MAX_HOLD = 20 # safety fallback bars
# ─────────────────────────────────────────────────────────────────────────
# Step 1: Preload dvol_btc + fng for all 1710 dates from NPZ
# ─────────────────────────────────────────────────────────────────────────
print("Preloading dvol_btc and fng from NPZ files...")
t0 = time.time()
parquet_files = sorted(VBT_DIR.glob("*.parquet"))
parquet_files = [p for p in parquet_files if 'catalog' not in str(p)]
total = len(parquet_files)
dvol_map = {} # ds → float
fng_map = {} # ds → float
DVOL_IDX = None
FNG_IDX = None
for pf in parquet_files:
ds = pf.stem # YYYY-MM-DD
npz_path = EIG_DIR / ds / "scan_000001__Indicators.npz"
if not npz_path.exists():
continue
try:
data = np.load(npz_path, allow_pickle=True)
names = list(data['api_names'])
inds = data['api_indicators']
succ = data['api_success']
if DVOL_IDX is None and 'dvol_btc' in names:
DVOL_IDX = names.index('dvol_btc')
if FNG_IDX is None and 'fng' in names:
FNG_IDX = names.index('fng')
if DVOL_IDX is not None and succ[DVOL_IDX]:
dvol_map[ds] = float(inds[DVOL_IDX])
if FNG_IDX is not None and succ[FNG_IDX]:
fng_map[ds] = float(inds[FNG_IDX])
except Exception:
pass
print(f" dvol_btc: {len(dvol_map)} dates fng: {len(fng_map)} dates")
# Global dvol quartile boundaries (from all dates with data)
dvol_vals_all = np.array(sorted(dvol_map.values()))
dvol_p25 = np.percentile(dvol_vals_all, 25)
dvol_p50 = np.percentile(dvol_vals_all, 50)
dvol_p75 = np.percentile(dvol_vals_all, 75)
print(f" dvol quartiles: p25={dvol_p25:.1f} p50={dvol_p50:.1f} p75={dvol_p75:.1f}")
# Decile boundaries for dvol (for bydvol log)
dvol_deciles = np.percentile(dvol_vals_all, np.arange(0, 101, 10))
print(f" dvol deciles: {[f'{v:.0f}' for v in dvol_deciles]}")
fng_vals_all = np.array(sorted(fng_map.values()))
fng_p50 = np.percentile(fng_vals_all, 50) if len(fng_vals_all) > 0 else 50.0
print(f" fng p50={fng_p50:.1f}\n")
# ─────────────────────────────────────────────────────────────────────────
# Gate configurations to test
# Each gate is (name, LONG_condition(dvol,fng,rvol,bret), SHORT_condition)
# ─────────────────────────────────────────────────────────────────────────
# We'll handle gate logic inside the main loop with dvol thresholds
# For simplicity: gate defined by (dvol_long_max, dvol_short_min) thresholds
# LONG fires if dvol <= dvol_long_max; SHORT fires if dvol >= dvol_short_min
GATE_CONFIGS = [
# name, long_max, short_min
('UNGATED', 9999, 0), # always both
('SPLIT_p50', dvol_p50, dvol_p50), # median split
('SPLIT_p25p75', dvol_p25, dvol_p75), # strict quartiles
('LONG_ONLY_p50', dvol_p50, 9999), # only LONG in calm
('SHORT_ONLY_p50', 0, dvol_p50), # only SHORT in fear
('LONG_ONLY_p25', dvol_p25, 9999), # very strict LONG
('SHORT_ONLY_p75', 0, dvol_p75), # very strict SHORT
('SPLIT_55', 55.0, 55.0),
('SPLIT_60', 60.0, 60.0),
('SPLIT_65', 65.0, 65.0),
('SPLIT_70', 70.0, 70.0),
]
YEARS = ['2021', '2022', '2023', '2024', '2025', '2026']
HOURS = list(range(24))
DVOL_BKT = ['<p10','p10-20','p20-30','p30-40','p40-50','p50-60','p60-70','p70-80','p80-90','>p90']
RVOL_BKT = ['Q1_calm','Q2','Q3','Q4_volatile']
# Accumulators:
# stats[(gate_name, direction, year)] = {wins, losses, gw, gl, n, hold_sum}
# hour_stats[(gate_name, direction, hour)] = same
# dvol_stats[(dvol_bucket, direction)] = same (ungated)
# rvol_stats[(rvol_bucket, direction)] = same (ungated)
# daily_log[ds] = {direction: {gate: {n, wins, gw, gl}}}
def make_s():
return {'n': 0, 'wins': 0, 'gw': 0.0, 'gl': 0.0, 'hold_sum': 0}
stats = defaultdict(make_s) # (gate, dir, year)
hour_stats = defaultdict(make_s) # (gate, dir, hour)
dvol_stats = defaultdict(make_s) # (dvol_bkt, dir)
rvol_stats = defaultdict(make_s) # (rvol_bkt, dir)
daily_log = []
def dvol_bucket(dvol):
for i, (lo, hi) in enumerate(zip(dvol_deciles[:-1], dvol_deciles[1:])):
if dvol <= hi:
return DVOL_BKT[i]
return DVOL_BKT[-1]
def rvol_bucket(rvol, rvol_p25, rvol_p50, rvol_p75):
if rvol < rvol_p25: return 'Q1_calm'
if rvol < rvol_p50: return 'Q2'
if rvol < rvol_p75: return 'Q3'
return 'Q4_volatile'
# Global realized_vol quartile: collect first, then reprocess
# Actually we'll do a two-pass: pass 1 for rvol quartiles, pass 2 for stats
# For efficiency, do it in one pass with approximate quartiles from first 200 files
print("Computing realized_vol distribution (sample pass)...")
rvol_sample = []
for pf in parquet_files[::9]: # sample every 9th file (~190 files)
try:
df = pd.read_parquet(pf, columns=['BTCUSDT'])
btc = df['BTCUSDT'].values.astype(np.float64)
btc = btc[np.isfinite(btc) & (btc > 0)]
if len(btc) > 60:
lr = np.diff(np.log(btc))
rvol_sample.append(lr.std() * np.sqrt(1440))
except:
pass
rvol_sample = np.array(rvol_sample)
rvol_p25 = np.percentile(rvol_sample, 25)
rvol_p50 = np.percentile(rvol_sample, 50)
rvol_p75 = np.percentile(rvol_sample, 75)
print(f" realized_vol quartiles: p25={rvol_p25:.4f} p50={rvol_p50:.4f} p75={rvol_p75:.4f}\n")
# ─────────────────────────────────────────────────────────────────────────
# Main loop
# ─────────────────────────────────────────────────────────────────────────
print(f"Main loop: {total} files {len(GATE_CONFIGS)} gate configs...\n")
t1 = time.time()
for i_file, pf in enumerate(parquet_files):
ds = pf.stem
year = ds[:4]
# Get macro indicators for this date
dvol = dvol_map.get(ds, np.nan)
fng = fng_map.get(ds, np.nan)
try:
df = pd.read_parquet(pf)
except Exception:
continue
if 'vel_div' not in df.columns or 'BTCUSDT' not in df.columns:
continue
vd = df['vel_div'].values.astype(np.float64)
btc = df['BTCUSDT'].values.astype(np.float64)
# Extract bar timestamps if available (for hour-of-day)
if hasattr(df.index, 'hour'):
bar_hours = df.index.hour
elif 'timestamp' in df.columns:
bar_hours = pd.to_datetime(df['timestamp']).dt.hour.values
else:
bar_hours = np.zeros(len(btc), dtype=int) # fallback
del df
vd = np.where(np.isfinite(vd), vd, 0.0)
btc = np.where(np.isfinite(btc) & (btc > 0), btc, np.nan)
n = len(btc)
if n < MAX_HOLD + 5:
del vd, btc
continue
# Daily stats
btc_valid = btc[np.isfinite(btc)]
if len(btc_valid) > 60:
lr = np.diff(np.log(btc_valid))
rvol = lr.std() * np.sqrt(1440)
bret = (btc_valid[-1] - btc_valid[0]) / btc_valid[0]
else:
rvol = np.nan
bret = np.nan
rvol_bkt = rvol_bucket(rvol, rvol_p25, rvol_p50, rvol_p75) if np.isfinite(rvol) else 'Q2'
dvol_bkt = dvol_bucket(dvol) if np.isfinite(dvol) else DVOL_BKT[5]
n_usable = n - MAX_HOLD
vd_windows = sliding_window_view(vd, MAX_HOLD + 1)[:n_usable]
btc_windows = sliding_window_view(btc, MAX_HOLD + 1)[:n_usable]
ep_arr = btc_windows[:, 0]
valid = np.isfinite(ep_arr) & (ep_arr > 0)
# Precompute exit conditions for LONG and SHORT (fixed exhst=±ENTRY_T, inv=INV_T, mh=MAX_HOLD)
# We'll compute for both directions and gate by config
for direction in ['L', 'S']:
if direction == 'L':
entry_mask = (vd[:n_usable] <= -ENTRY_T) & valid
ep_col = ep_arr[:, None]
fut_btc = btc_windows[:, 1:] # (n_usable, MAX_HOLD)
price_ret = (fut_btc - ep_col) / ep_col # positive = up = LONG win
fut_vd = vd_windows[:, 1:]
exhst_mask = fut_vd >= +ENTRY_T # vel_div crossed to positive
inv_mask = fut_vd <= -INV_T # deepened more negative
else: # SHORT
entry_mask = (vd[:n_usable] >= +ENTRY_T) & valid
ep_col = ep_arr[:, None]
fut_btc = btc_windows[:, 1:]
price_ret = (ep_col - fut_btc) / ep_col # positive = down = SHORT win
fut_vd = vd_windows[:, 1:]
exhst_mask = fut_vd <= -ENTRY_T # vel_div crossed to negative
inv_mask = fut_vd >= +INV_T # went further positive
price_ret = np.where(np.isfinite(fut_btc), price_ret, 0.0)
sig_idx = np.where(entry_mask)[0]
if len(sig_idx) == 0:
continue
N_sig = len(sig_idx)
fvd = fut_vd[sig_idx] # (N_sig, MAX_HOLD)
fpr = price_ret[sig_idx]
fbt = fut_btc[sig_idx]
# Exit logic — vectorized
BIG = MAX_HOLD + 1
exhst_bar = np.where(exhst_mask[sig_idx].any(1), np.argmax(exhst_mask[sig_idx], 1), BIG)
inv_bar = np.where(inv_mask[sig_idx].any(1), np.argmax(inv_mask[sig_idx], 1), BIG)
mh_bar = np.full(N_sig, MAX_HOLD - 1, dtype=np.int32)
# Priority: EXHST > INV > MAX_HOLD (no TP/STOP for clean regime-only analysis)
all_bars = np.column_stack([exhst_bar, inv_bar, mh_bar])
first_idx = np.argmin(all_bars, axis=1) # 0=EXHST, 1=INV, 2=MAX_HOLD
exit_bar = np.clip(all_bars[np.arange(N_sig), first_idx], 0, MAX_HOLD - 1)
exit_pnl = fpr[np.arange(N_sig), exit_bar]
won = exit_pnl > 0
hold_bars = exit_bar + 1 # +1 because 0-indexed
# Bar hours for each entry
entry_hours = bar_hours[sig_idx] if len(bar_hours) == n else np.zeros(N_sig, dtype=int)
# Update dvol/rvol stats (ungated)
n_tot = N_sig
n_wins = int(won.sum())
gw_tot = float(exit_pnl[won].sum()) if won.any() else 0.0
gl_tot = float((-exit_pnl[~won]).sum()) if (~won).any() else 0.0
hs_tot = int(hold_bars.sum())
dvol_stats[(dvol_bkt, direction)]['n'] += n_tot
dvol_stats[(dvol_bkt, direction)]['wins'] += n_wins
dvol_stats[(dvol_bkt, direction)]['gw'] += gw_tot
dvol_stats[(dvol_bkt, direction)]['gl'] += gl_tot
dvol_stats[(dvol_bkt, direction)]['hold_sum'] += hs_tot
rvol_stats[(rvol_bkt, direction)]['n'] += n_tot
rvol_stats[(rvol_bkt, direction)]['wins'] += n_wins
rvol_stats[(rvol_bkt, direction)]['gw'] += gw_tot
rvol_stats[(rvol_bkt, direction)]['gl'] += gl_tot
rvol_stats[(rvol_bkt, direction)]['hold_sum'] += hs_tot
# Hour-of-day breakdown (ungated)
for h in np.unique(entry_hours):
hmask = (entry_hours == h)
hn = int(hmask.sum())
hw = int(won[hmask].sum())
hgw = float(exit_pnl[hmask & won].sum()) if (hmask & won).any() else 0.0
hgl = float((-exit_pnl[hmask & ~won]).sum()) if (hmask & ~won).any() else 0.0
hhs = int(hold_bars[hmask].sum())
k = ('UNGATED', direction, int(h))
hour_stats[k]['n'] += hn
hour_stats[k]['wins'] += hw
hour_stats[k]['gw'] += hgw
hour_stats[k]['gl'] += hgl
hour_stats[k]['hold_sum'] += hhs
# Gate-specific accumulation
for gate_name, long_max, short_min in GATE_CONFIGS:
# Check if this date/direction is allowed by the gate
if np.isnan(dvol):
# No dvol data → allow both (ungated behavior for this date)
gate_ok = True
elif direction == 'L':
gate_ok = (dvol <= long_max)
else:
gate_ok = (dvol >= short_min)
if not gate_ok:
continue
k = (gate_name, direction, year)
stats[k]['n'] += n_tot
stats[k]['wins'] += n_wins
stats[k]['gw'] += gw_tot
stats[k]['gl'] += gl_tot
stats[k]['hold_sum'] += hs_tot
# Daily log (gate=UNGATED for now)
daily_log.append({
'date': ds, 'year': year, 'direction': direction,
'dvol': round(dvol, 2) if np.isfinite(dvol) else None,
'fng': round(fng, 1) if np.isfinite(fng) else None,
'rvol': round(rvol, 5) if np.isfinite(rvol) else None,
'bret': round(bret, 5) if np.isfinite(bret) else None,
'n_trades': n_tot, 'wins': n_wins,
'wr': round(n_wins / n_tot * 100, 2) if n_tot else None,
'gw': round(gw_tot, 6), 'gl': round(gl_tot, 6),
'pf': round(gw_tot / gl_tot, 4) if gl_tot > 0 else (999.0 if gw_tot > 0 else None),
'avg_hold': round(hs_tot / n_tot, 2) if n_tot else None,
})
del fvd, fpr, fbt, exhst_bar, inv_bar, mh_bar, all_bars, exit_bar, exit_pnl, won, hold_bars, sig_idx
del vd, btc, vd_windows, btc_windows, ep_arr, valid
if (i_file + 1) % 200 == 0:
gc.collect()
elapsed = time.time() - t1
eta = elapsed / (i_file + 1) * (total - i_file - 1)
print(f" [{i_file+1}/{total}] {ds} {elapsed:.0f}s eta={eta:.0f}s")
elapsed = time.time() - t1
print(f"\nPass complete: {elapsed:.0f}s\n")
# ─────────────────────────────────────────────────────────────────────────
# Build output rows
# ─────────────────────────────────────────────────────────────────────────
def metrics(s):
n = s['n']; w = s['wins']; gw = s['gw']; gl = s['gl']; hs = s['hold_sum']
wr = w / n * 100 if n else float('nan')
pf = gw / gl if gl > 0 else (999.0 if gw > 0 else float('nan'))
ah = hs / n if n else float('nan')
return n, round(wr, 3), round(pf, 4), round(ah, 3)
# ── Summary rows (gate × direction)
summary_rows = []
for gate_name, _, _ in GATE_CONFIGS:
for direction in ['L', 'S']:
# Aggregate across years
agg = make_s()
for yr in YEARS:
s = stats.get((gate_name, direction, yr))
if s:
for f in ['n','wins','hold_sum']:
agg[f] += s[f]
for f in ['gw','gl']:
agg[f] += s[f]
n, wr, pf, ah = metrics(agg)
summary_rows.append({
'gate': gate_name, 'direction': direction,
'n_trades': n, 'wr': wr, 'pf': pf, 'avg_hold': ah,
'gw': round(agg['gw'], 2), 'gl': round(agg['gl'], 2),
})
# ── Per-year rows
year_rows = []
for gate_name, _, _ in GATE_CONFIGS:
for direction in ['L', 'S']:
for yr in YEARS:
s = stats.get((gate_name, direction, yr), make_s())
n, wr, pf, ah = metrics(s)
year_rows.append({
'gate': gate_name, 'direction': direction, 'year': yr,
'n_trades': n, 'wr': wr, 'pf': pf, 'avg_hold': ah,
})
# ── dvol decile rows
dvol_rows = []
for bkt in DVOL_BKT:
for direction in ['L', 'S']:
s = dvol_stats.get((bkt, direction), make_s())
n, wr, pf, ah = metrics(s)
dvol_rows.append({'dvol_bucket': bkt, 'direction': direction,
'n_trades': n, 'wr': wr, 'pf': pf, 'avg_hold': ah})
# ── rvol quartile rows
rvol_rows = []
for bkt in RVOL_BKT:
for direction in ['L', 'S']:
s = rvol_stats.get((bkt, direction), make_s())
n, wr, pf, ah = metrics(s)
rvol_rows.append({'rvol_bucket': bkt, 'direction': direction,
'n_trades': n, 'wr': wr, 'pf': pf, 'avg_hold': ah})
# ── Hour-of-day rows
hour_rows = []
for h in HOURS:
for direction in ['L', 'S']:
s = hour_stats.get(('UNGATED', direction, h), make_s())
n, wr, pf, ah = metrics(s)
hour_rows.append({'hour_utc': h, 'direction': direction,
'n_trades': n, 'wr': wr, 'pf': pf, 'avg_hold': ah})
# ─────────────────────────────────────────────────────────────────────────
# Save all CSVs
# ─────────────────────────────────────────────────────────────────────────
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
def save_csv(rows, name):
if not rows: return
path = LOG_DIR / f"macro_gated_{name}_{ts}.csv"
with open(path, 'w', newline='', encoding='utf-8') as f:
w = csv.DictWriter(f, fieldnames=rows[0].keys())
w.writeheader(); w.writerows(rows)
print(f"{path} ({len(rows)} rows)")
print("Saving CSVs...")
save_csv(summary_rows, 'summary')
save_csv(year_rows, 'byyear')
save_csv(dvol_rows, 'bydvol')
save_csv(rvol_rows, 'byrvol')
save_csv(hour_rows, 'byhour')
save_csv(daily_log, 'dailylog')
# ─────────────────────────────────────────────────────────────────────────
# Console output
# ─────────────────────────────────────────────────────────────────────────
def pf_str(pf):
if np.isnan(pf): return ' nan'
if pf >= 999: return ' inf '
mark = '***' if pf > 1.0 else ('** ' if pf > 0.8 else ('* ' if pf > 0.6 else ' '))
return f'{pf:6.3f}{mark}'
print(f"\n{'='*90}")
print(f" MACRO-GATED RESULTS — ALL GATE CONFIGS")
print(f" LONG: vel_div<=-{ENTRY_T} entry, vel_div>=+{ENTRY_T} exhaustion exit")
print(f" SHORT: vel_div>=+{ENTRY_T} entry, vel_div<=-{ENTRY_T} exhaustion exit")
print(f" Max hold: {MAX_HOLD} bars | inv_t={INV_T}")
print(f" dvol quartiles: p25={dvol_p25:.1f} p50={dvol_p50:.1f} p75={dvol_p75:.1f}")
print(f"{'='*90}")
hdr = f" {'Gate':<22} {'Dir':3} {'N':>9} {'WR%':>7} {'PF':>10} {'AvgHold':>8}"
print(hdr)
print(f" {'-'*70}")
for row in summary_rows:
print(f" {row['gate']:<22} {row['direction']:3} {row['n_trades']:>9,} "
f"{row['wr']:>7.1f}% {pf_str(row['pf']):>10} {row['avg_hold']:>8.2f}b")
# Per-year for top configs
print(f"\n{'='*90}")
print(f" PER-YEAR BREAKDOWN")
print(f"{'='*90}")
TOP_GATES = ['UNGATED', 'SPLIT_p50', 'LONG_ONLY_p50', 'SHORT_ONLY_p50',
'SPLIT_p25p75', 'SPLIT_60', 'SPLIT_65']
for gate_name in TOP_GATES:
print(f"\n Gate: {gate_name}")
print(f" {'Year':<6} {'L_N':>8} {'L_WR':>7} {'L_PF':>9} | {'S_N':>8} {'S_WR':>7} {'S_PF':>9}")
print(f" {'-'*60}")
for yr in YEARS:
sl = stats.get((gate_name, 'L', yr), make_s())
ss = stats.get((gate_name, 'S', yr), make_s())
nl, wrl, pfl, _ = metrics(sl)
ns, wrs, pfs, _ = metrics(ss)
print(f" {yr:<6} {nl:>8,} {wrl:>7.1f}% {pf_str(pfl):>9} | "
f"{ns:>8,} {wrs:>7.1f}% {pf_str(pfs):>9}")
# dvol decile breakdown
print(f"\n{'='*90}")
print(f" EDGE BY dvol_btc DECILE (ungated)")
print(f"{'='*90}")
print(f" {'dvol_bucket':<12} {'L_N':>8} {'L_WR':>7} {'L_PF':>9} | {'S_N':>8} {'S_WR':>7} {'S_PF':>9}")
print(f" {'-'*65}")
for bkt in DVOL_BKT:
sl = dvol_stats.get((bkt, 'L'), make_s())
ss = dvol_stats.get((bkt, 'S'), make_s())
nl, wrl, pfl, _ = metrics(sl)
ns, wrs, pfs, _ = metrics(ss)
print(f" {bkt:<12} {nl:>8,} {wrl:>7.1f}% {pf_str(pfl):>9} | "
f"{ns:>8,} {wrs:>7.1f}% {pf_str(pfs):>9}")
# rvol quartile breakdown
print(f"\n{'='*90}")
print(f" EDGE BY REALIZED VOL QUARTILE (ungated)")
print(f"{'='*90}")
print(f" {'rvol_bucket':<14} {'L_N':>8} {'L_WR':>7} {'L_PF':>9} | {'S_N':>8} {'S_WR':>7} {'S_PF':>9}")
print(f" {'-'*65}")
for bkt in RVOL_BKT:
sl = rvol_stats.get((bkt, 'L'), make_s())
ss = rvol_stats.get((bkt, 'S'), make_s())
nl, wrl, pfl, _ = metrics(sl)
ns, wrs, pfs, _ = metrics(ss)
print(f" {bkt:<14} {nl:>8,} {wrl:>7.1f}% {pf_str(pfl):>9} | "
f"{ns:>8,} {wrs:>7.1f}% {pf_str(pfs):>9}")
# Hour-of-day
print(f"\n{'='*90}")
print(f" EDGE BY HOUR-OF-DAY UTC (ungated)")
print(f"{'='*90}")
print(f" {'Hour':>6} {'L_N':>8} {'L_WR':>7} {'L_PF':>9} | {'S_N':>8} {'S_WR':>7} {'S_PF':>9}")
print(f" {'-'*65}")
for h in HOURS:
sl = hour_stats.get(('UNGATED', 'L', h), make_s())
ss = hour_stats.get(('UNGATED', 'S', h), make_s())
nl, wrl, pfl, _ = metrics(sl)
ns, wrs, pfs, _ = metrics(ss)
print(f" {h:>5}h {nl:>8,} {wrl:>7.1f}% {pf_str(pfl):>9} | "
f"{ns:>8,} {wrs:>7.1f}% {pf_str(pfs):>9}")
print(f"\n Total runtime: {time.time()-t0:.0f}s")
print(f" dvol gate boundaries used: p25={dvol_p25:.1f} p50={dvol_p50:.1f} p75={dvol_p75:.1f}")
print(f"\n KEY QUESTIONS ANSWERED:")
print(f" 1. Does dvol gate boost PF above 1.0 for LONG? → see SPLIT_p50 LONG row")
print(f" 2. Does dvol gate boost SHORT? → see SPLIT_p50 SHORT row")
print(f" 3. Combined posture (L in calm, S in fear)? → see SPLIT_p50 combined N")
print(f" 4. What dvol level gives best LONG edge? → see bydvol table")
print(f" 5. Time-of-day structure? → see byhour table")