363 lines
14 KiB
Python
363 lines
14 KiB
Python
|
|
"""MacroPostureSwitcher Backtest — 1m Klines (5 years)
|
||
|
|
======================================================
|
||
|
|
Signal:
|
||
|
|
SHORT posture: vel_div >= +ENTRY_T → SHORT, exit vel_div <= -ENTRY_T (crossover)
|
||
|
|
LONG posture: vel_div <= -ENTRY_T → LONG, exit vel_div >= +ENTRY_T (crossover)
|
||
|
|
NONE posture: skip day entirely
|
||
|
|
|
||
|
|
Posture inputs (no lookahead):
|
||
|
|
dvol_btc, fng, funding_btc — from NPZ, available at day-start
|
||
|
|
realized_vol — PREVIOUS day's intraday vol (lag-1)
|
||
|
|
btc_day_return — PREVIOUS day's BTC return (lag-1)
|
||
|
|
|
||
|
|
Outputs: ROI / WR / PF / Max-DD / Sharpe (all logged to CSV + console)
|
||
|
|
"""
|
||
|
|
import sys, time, csv, gc, json
|
||
|
|
sys.path.insert(0, str(__import__('pathlib').Path(__file__).parent.parent))
|
||
|
|
sys.stdout.reconfigure(encoding='utf-8', errors='replace')
|
||
|
|
|
||
|
|
from pathlib import Path
|
||
|
|
from datetime import datetime
|
||
|
|
from collections import defaultdict
|
||
|
|
import numpy as np
|
||
|
|
import pandas as pd
|
||
|
|
|
||
|
|
from nautilus_dolphin.nautilus.macro_posture_switcher import MacroPostureSwitcher, Posture
|
||
|
|
|
||
|
|
VBT_DIR = Path(r"C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict\vbt_cache_klines")
|
||
|
|
EIGEN_PATH = Path(r"C:\Users\Lenovo\Documents\- Dolphin NG HD (NG3)\correlation_arb512\eigenvalues")
|
||
|
|
LOG_DIR = Path(r"C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict\nautilus_dolphin\run_logs")
|
||
|
|
|
||
|
|
ENTRY_T = 0.020
|
||
|
|
MAX_HOLD = 20 # bars safety cap (20 min on 1m)
|
||
|
|
YEARS = ['2021', '2022', '2023', '2024', '2025', '2026']
|
||
|
|
|
||
|
|
# ── ExF loader (reuse ACB pattern) ────────────────────────────────────────────
|
||
|
|
EXF_KEYS = ['dvol_btc', 'fng', 'funding_btc', 'taker']
|
||
|
|
|
||
|
|
def load_exf(date_str: str) -> dict:
|
||
|
|
defaults = {'dvol_btc': 50.0, 'fng': 50.0, 'funding_btc': 0.0, 'taker': 1.0}
|
||
|
|
dp = EIGEN_PATH / date_str
|
||
|
|
if not dp.exists():
|
||
|
|
return defaults
|
||
|
|
files = sorted(dp.glob('scan_*__Indicators.npz'))[:5]
|
||
|
|
if not files:
|
||
|
|
return defaults
|
||
|
|
buckets = defaultdict(list)
|
||
|
|
for f in files:
|
||
|
|
try:
|
||
|
|
d = np.load(f, allow_pickle=True)
|
||
|
|
if 'api_names' not in d:
|
||
|
|
continue
|
||
|
|
names = list(d['api_names'])
|
||
|
|
vals = d['api_indicators']
|
||
|
|
for k in EXF_KEYS:
|
||
|
|
if k in names:
|
||
|
|
v = float(vals[names.index(k)])
|
||
|
|
if np.isfinite(v):
|
||
|
|
buckets[k].append(v)
|
||
|
|
except Exception:
|
||
|
|
pass
|
||
|
|
out = dict(defaults)
|
||
|
|
for k, vs in buckets.items():
|
||
|
|
if vs:
|
||
|
|
out[k] = float(np.median(vs))
|
||
|
|
return out
|
||
|
|
|
||
|
|
# ── Load all parquet dates ─────────────────────────────────────────────────────
|
||
|
|
parquet_files = sorted(VBT_DIR.glob("*.parquet"))
|
||
|
|
parquet_files = [p for p in parquet_files if 'catalog' not in str(p)]
|
||
|
|
total = len(parquet_files)
|
||
|
|
print(f"Files: {total} | Entry T: ±{ENTRY_T} MaxHold: {MAX_HOLD}b")
|
||
|
|
|
||
|
|
# ── Pass 1: compute prev-day realized_vol and btc_return for each date ─────────
|
||
|
|
print("Pass 1: computing lag-1 realized_vol and btc_return...")
|
||
|
|
t0 = time.time()
|
||
|
|
day_rvol = {} # date_str -> rvol
|
||
|
|
day_btcret = {} # date_str -> btc_return
|
||
|
|
|
||
|
|
for pf in parquet_files:
|
||
|
|
ds = pf.stem
|
||
|
|
try:
|
||
|
|
df = pd.read_parquet(pf, columns=['BTCUSDT'])
|
||
|
|
except Exception:
|
||
|
|
continue
|
||
|
|
btc = df['BTCUSDT'].values.astype(np.float64)
|
||
|
|
btc = btc[np.isfinite(btc) & (btc > 0)]
|
||
|
|
if len(btc) < 2:
|
||
|
|
continue
|
||
|
|
log_r = np.diff(np.log(btc))
|
||
|
|
day_rvol[ds] = float(np.std(log_r))
|
||
|
|
day_btcret[ds] = float((btc[-1] - btc[0]) / btc[0])
|
||
|
|
|
||
|
|
dates_sorted = sorted(day_rvol.keys())
|
||
|
|
# Build lag-1 maps
|
||
|
|
prev_rvol = {d: day_rvol.get(dates_sorted[i-1]) if i > 0 else None for i, d in enumerate(dates_sorted)}
|
||
|
|
prev_btcret = {d: day_btcret.get(dates_sorted[i-1]) if i > 0 else None for i, d in enumerate(dates_sorted)}
|
||
|
|
|
||
|
|
print(f" Pass 1 done: {time.time()-t0:.0f}s")
|
||
|
|
|
||
|
|
# ── Pass 2: trade simulation ───────────────────────────────────────────────────
|
||
|
|
print("Pass 2: posture + crossover trades...")
|
||
|
|
|
||
|
|
switcher = MacroPostureSwitcher(enable_long_posture=True)
|
||
|
|
|
||
|
|
# Per-trade log
|
||
|
|
trade_log = [] # list of dicts
|
||
|
|
# Per-day stats
|
||
|
|
day_log = []
|
||
|
|
|
||
|
|
# Equity curve (cumulative, compounding)
|
||
|
|
equity = 1.0
|
||
|
|
equity_curve = [equity]
|
||
|
|
|
||
|
|
stats_yr = defaultdict(lambda: {'wins':0,'losses':0,'gw':0.0,'gl':0.0,'n':0,'paused':0})
|
||
|
|
stats_pos = defaultdict(lambda: {'wins':0,'losses':0,'gw':0.0,'gl':0.0,'n':0})
|
||
|
|
|
||
|
|
for i, pf in enumerate(parquet_files):
|
||
|
|
ds = pf.stem
|
||
|
|
year = ds[:4]
|
||
|
|
|
||
|
|
# ExF for today (day-start, no lookahead)
|
||
|
|
exf = load_exf(ds)
|
||
|
|
pr = prev_rvol.get(ds) # prev day rvol (may be None on day 1)
|
||
|
|
pb = prev_btcret.get(ds) # prev day btc ret
|
||
|
|
|
||
|
|
decision = switcher.decide(
|
||
|
|
dvol_btc = exf['dvol_btc'],
|
||
|
|
fng = exf['fng'],
|
||
|
|
funding_btc = exf['funding_btc'],
|
||
|
|
realized_vol = pr,
|
||
|
|
btc_day_return = pb,
|
||
|
|
)
|
||
|
|
|
||
|
|
if decision.posture == Posture.NONE:
|
||
|
|
stats_yr[year]['paused'] += 1
|
||
|
|
day_log.append({'date': ds, 'year': year, 'posture': 'NONE',
|
||
|
|
'fear': round(decision.fear_score, 3),
|
||
|
|
'n_trades': 0, 'day_ret': 0.0, 'equity': round(equity, 6)})
|
||
|
|
continue
|
||
|
|
|
||
|
|
# Load klines
|
||
|
|
try:
|
||
|
|
df = pd.read_parquet(pf)
|
||
|
|
except Exception:
|
||
|
|
continue
|
||
|
|
if 'vel_div' not in df.columns or 'BTCUSDT' not in df.columns:
|
||
|
|
continue
|
||
|
|
|
||
|
|
vd = df['vel_div'].values.astype(np.float64)
|
||
|
|
btc = df['BTCUSDT'].values.astype(np.float64)
|
||
|
|
vd = np.where(np.isfinite(vd), vd, 0.0)
|
||
|
|
btc = np.where(np.isfinite(btc) & (btc > 0), btc, np.nan)
|
||
|
|
n = len(btc)
|
||
|
|
del df
|
||
|
|
|
||
|
|
if n < MAX_HOLD + 5:
|
||
|
|
del vd, btc
|
||
|
|
continue
|
||
|
|
|
||
|
|
pos = decision.posture
|
||
|
|
smult = decision.size_mult
|
||
|
|
|
||
|
|
# Set signal and crossover conditions based on posture
|
||
|
|
if pos == Posture.SHORT:
|
||
|
|
entry_mask = (vd >= ENTRY_T) & np.isfinite(btc)
|
||
|
|
cross_back = (vd <= -ENTRY_T)
|
||
|
|
sign = -1 # SHORT return = (ep - xp) / ep
|
||
|
|
else: # LONG
|
||
|
|
entry_mask = (vd <= -ENTRY_T) & np.isfinite(btc)
|
||
|
|
cross_back = (vd >= ENTRY_T)
|
||
|
|
sign = +1 # LONG return = (xp - ep) / ep
|
||
|
|
|
||
|
|
day_trades = []
|
||
|
|
for t in range(n - MAX_HOLD):
|
||
|
|
if not entry_mask[t]:
|
||
|
|
continue
|
||
|
|
ep = btc[t]
|
||
|
|
if not np.isfinite(ep) or ep <= 0:
|
||
|
|
continue
|
||
|
|
|
||
|
|
exit_bar = MAX_HOLD
|
||
|
|
for k in range(1, MAX_HOLD + 1):
|
||
|
|
tb = t + k
|
||
|
|
if tb >= n:
|
||
|
|
exit_bar = k; break
|
||
|
|
if cross_back[tb]:
|
||
|
|
exit_bar = k; break
|
||
|
|
|
||
|
|
tb = t + exit_bar
|
||
|
|
if tb >= n:
|
||
|
|
continue
|
||
|
|
xp = btc[tb]
|
||
|
|
if not np.isfinite(xp) or xp <= 0:
|
||
|
|
continue
|
||
|
|
|
||
|
|
raw_ret = sign * (xp - ep) / ep
|
||
|
|
sized_ret = raw_ret * smult
|
||
|
|
day_trades.append({'ret': raw_ret, 'sized': sized_ret, 'hold': exit_bar})
|
||
|
|
|
||
|
|
del vd, btc, entry_mask, cross_back
|
||
|
|
|
||
|
|
# Day stats
|
||
|
|
n_t = len(day_trades)
|
||
|
|
if n_t == 0:
|
||
|
|
day_log.append({'date': ds, 'year': year, 'posture': pos.value,
|
||
|
|
'fear': round(decision.fear_score, 3),
|
||
|
|
'n_trades': 0, 'day_ret': 0.0, 'equity': round(equity, 6)})
|
||
|
|
continue
|
||
|
|
|
||
|
|
wins = sum(1 for t in day_trades if t['ret'] >= 0)
|
||
|
|
losses = n_t - wins
|
||
|
|
gw = sum(t['ret'] for t in day_trades if t['ret'] >= 0)
|
||
|
|
gl = sum(abs(t['ret']) for t in day_trades if t['ret'] < 0)
|
||
|
|
pf_day = gw / gl if gl > 0 else 999.0
|
||
|
|
day_ret = sum(t['sized'] for t in day_trades)
|
||
|
|
|
||
|
|
# Equity compounding: treat sum of sized returns as portfolio return
|
||
|
|
# Clip to avoid blowup on extreme days
|
||
|
|
day_ret_clamped = max(-0.5, min(day_ret, 2.0))
|
||
|
|
equity *= (1 + day_ret_clamped)
|
||
|
|
equity_curve.append(equity)
|
||
|
|
|
||
|
|
# Accumulate stats
|
||
|
|
s = stats_yr[year]
|
||
|
|
s['wins'] += wins; s['losses'] += losses
|
||
|
|
s['gw'] += gw; s['gl'] += gl
|
||
|
|
s['n'] += n_t
|
||
|
|
|
||
|
|
sp = stats_pos[pos.value]
|
||
|
|
sp['wins'] += wins; sp['losses'] += losses
|
||
|
|
sp['gw'] += gw; sp['gl'] += gl
|
||
|
|
sp['n'] += n_t
|
||
|
|
|
||
|
|
day_log.append({
|
||
|
|
'date': ds, 'year': year, 'posture': pos.value,
|
||
|
|
'fear': round(decision.fear_score, 3),
|
||
|
|
'dvol': round(exf['dvol_btc'], 1), 'fng': round(exf['fng'], 1),
|
||
|
|
'prev_rvol': round(pr, 7) if pr else None,
|
||
|
|
'n_trades': n_t, 'wins': wins, 'losses': losses,
|
||
|
|
'pf_day': round(pf_day, 4), 'day_ret': round(day_ret, 6),
|
||
|
|
'equity': round(equity, 6),
|
||
|
|
})
|
||
|
|
|
||
|
|
if (i + 1) % 200 == 0:
|
||
|
|
gc.collect()
|
||
|
|
print(f" [{i+1}/{total}] {ds} eq={equity:.4f} {time.time()-t0:.0f}s")
|
||
|
|
|
||
|
|
elapsed = time.time() - t0
|
||
|
|
print(f"\nPass 2 done: {elapsed:.0f}s")
|
||
|
|
|
||
|
|
# ── Metrics ────────────────────────────────────────────────────────────────────
|
||
|
|
ec = np.array(equity_curve)
|
||
|
|
roi = (ec[-1] - 1.0) * 100
|
||
|
|
|
||
|
|
# Max drawdown from equity curve
|
||
|
|
running_max = np.maximum.accumulate(ec)
|
||
|
|
dd = (running_max - ec) / running_max
|
||
|
|
max_dd = float(np.max(dd)) * 100
|
||
|
|
|
||
|
|
# Daily returns (day_log)
|
||
|
|
daily_rets = np.array([d['day_ret'] for d in day_log if d['n_trades'] > 0])
|
||
|
|
sharpe = float(np.mean(daily_rets) / np.std(daily_rets) * np.sqrt(252)) if len(daily_rets) > 1 and np.std(daily_rets) > 0 else 0.0
|
||
|
|
|
||
|
|
# Overall PF / WR
|
||
|
|
tot_w = sum(s['wins'] for s in stats_yr.values())
|
||
|
|
tot_l = sum(s['losses'] for s in stats_yr.values())
|
||
|
|
tot_gw = sum(s['gw'] for s in stats_yr.values())
|
||
|
|
tot_gl = sum(s['gl'] for s in stats_yr.values())
|
||
|
|
tot_n = tot_w + tot_l
|
||
|
|
pf = tot_gw / tot_gl if tot_gl > 0 else 999.0
|
||
|
|
wr = tot_w / tot_n * 100 if tot_n > 0 else 0.0
|
||
|
|
|
||
|
|
paused_days = sum(s['paused'] for s in stats_yr.values())
|
||
|
|
active_days = sum(1 for d in day_log if d['posture'] != 'NONE')
|
||
|
|
|
||
|
|
# ── Console summary ────────────────────────────────────────────────────────────
|
||
|
|
print(f"\n{'='*80}")
|
||
|
|
print(f" MacroPostureSwitcher — 1m Klines Backtest")
|
||
|
|
print(f" Entry: ±{ENTRY_T} MaxHold: {MAX_HOLD}b Runtime: {elapsed:.0f}s")
|
||
|
|
print(f"{'='*80}")
|
||
|
|
print(f" ROI: {roi:>+8.2f}%")
|
||
|
|
print(f" Max DD: {max_dd:>8.2f}%")
|
||
|
|
print(f" Sharpe: {sharpe:>8.3f} (annualized, daily)")
|
||
|
|
print(f" PF: {pf:>8.4f}")
|
||
|
|
print(f" WR: {wr:>8.2f}%")
|
||
|
|
print(f" N trades: {tot_n:>8,}")
|
||
|
|
print(f" Active days: {active_days} Paused: {paused_days}")
|
||
|
|
print(f" Equity final: {ec[-1]:.4f}x")
|
||
|
|
|
||
|
|
print(f"\n Per-year breakdown:")
|
||
|
|
print(f" {'Year':<6} {'N':>7} {'WR%':>6} {'PF':>7} {'Paused':>7}")
|
||
|
|
print(f" {'-'*45}")
|
||
|
|
for yr in YEARS:
|
||
|
|
s = stats_yr[yr]
|
||
|
|
n = s['wins'] + s['losses']
|
||
|
|
if n == 0:
|
||
|
|
print(f" {yr:<6} {'—':>7} {'—':>6} {'—':>7} {s['paused']:>7}")
|
||
|
|
continue
|
||
|
|
wr_yr = s['wins'] / n * 100
|
||
|
|
pf_yr = s['gw'] / s['gl'] if s['gl'] > 0 else 999.0
|
||
|
|
print(f" {yr:<6} {n:>7,} {wr_yr:>6.2f}% {pf_yr:>7.4f} {s['paused']:>7}")
|
||
|
|
|
||
|
|
print(f"\n Per-posture breakdown:")
|
||
|
|
print(f" {'Posture':<6} {'N':>8} {'WR%':>6} {'PF':>7}")
|
||
|
|
print(f" {'-'*35}")
|
||
|
|
for pos, s in sorted(stats_pos.items()):
|
||
|
|
n = s['wins'] + s['losses']
|
||
|
|
if n == 0: continue
|
||
|
|
wr_p = s['wins'] / n * 100
|
||
|
|
pf_p = s['gw'] / s['gl'] if s['gl'] > 0 else 999.0
|
||
|
|
print(f" {pos:<6} {n:>8,} {wr_p:>6.2f}% {pf_p:>7.4f}")
|
||
|
|
|
||
|
|
# ── Save logs ──────────────────────────────────────────────────────────────────
|
||
|
|
LOG_DIR.mkdir(exist_ok=True)
|
||
|
|
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||
|
|
|
||
|
|
# Day log CSV
|
||
|
|
day_csv = LOG_DIR / f"posture_1m_daily_{ts}.csv"
|
||
|
|
if day_log:
|
||
|
|
with open(day_csv, 'w', newline='') as f:
|
||
|
|
w = csv.DictWriter(f, fieldnames=day_log[0].keys())
|
||
|
|
w.writeheader(); w.writerows(day_log)
|
||
|
|
print(f"\n → {day_csv}")
|
||
|
|
|
||
|
|
# Equity curve
|
||
|
|
eq_csv = LOG_DIR / f"posture_1m_equity_{ts}.csv"
|
||
|
|
with open(eq_csv, 'w', newline='') as f:
|
||
|
|
w = csv.writer(f)
|
||
|
|
w.writerow(['idx', 'equity']); w.writerows(enumerate(equity_curve))
|
||
|
|
print(f" → {eq_csv}")
|
||
|
|
|
||
|
|
# Summary JSON
|
||
|
|
summary = {
|
||
|
|
'mode': '1m_klines_posture_backtest',
|
||
|
|
'ts': ts, 'runtime_s': round(elapsed, 1),
|
||
|
|
'entry_t': ENTRY_T, 'max_hold': MAX_HOLD,
|
||
|
|
'roi_pct': round(roi, 4),
|
||
|
|
'max_dd_pct': round(max_dd, 4),
|
||
|
|
'sharpe': round(sharpe, 4),
|
||
|
|
'pf': round(pf, 4),
|
||
|
|
'wr_pct': round(wr, 3),
|
||
|
|
'n_trades': int(tot_n),
|
||
|
|
'active_days': active_days,
|
||
|
|
'paused_days': int(paused_days),
|
||
|
|
'equity_final': round(float(ec[-1]), 6),
|
||
|
|
'per_year': {yr: {
|
||
|
|
'n': int(stats_yr[yr]['wins'] + stats_yr[yr]['losses']),
|
||
|
|
'wr': round(stats_yr[yr]['wins'] / max(1, stats_yr[yr]['wins'] + stats_yr[yr]['losses']) * 100, 3),
|
||
|
|
'pf': round(stats_yr[yr]['gw'] / max(1e-9, stats_yr[yr]['gl']), 4),
|
||
|
|
'paused': int(stats_yr[yr]['paused']),
|
||
|
|
} for yr in YEARS},
|
||
|
|
'per_posture': {pos: {
|
||
|
|
'n': int(s['wins'] + s['losses']),
|
||
|
|
'wr': round(s['wins'] / max(1, s['wins'] + s['losses']) * 100, 3),
|
||
|
|
'pf': round(s['gw'] / max(1e-9, s['gl']), 4),
|
||
|
|
} for pos, s in stats_pos.items()},
|
||
|
|
}
|
||
|
|
sum_json = LOG_DIR / f"posture_1m_summary_{ts}.json"
|
||
|
|
with open(sum_json, 'w') as f:
|
||
|
|
json.dump(summary, f, indent=2)
|
||
|
|
print(f" → {sum_json}")
|
||
|
|
print(f"\n Runtime: {elapsed:.0f}s")
|