280 lines
12 KiB
Python
280 lines
12 KiB
Python
|
|
"""Leverage risk/reward frontier: sweep max_leverage 5x-25x with ACB v6 active.
|
||
|
|
|
||
|
|
For each level: ROI, PF, Sharpe, DD, max effective leverage, min liquidation
|
||
|
|
distance, worst trade, Monte Carlo ruin, H1/H2 overfitting.
|
||
|
|
"""
|
||
|
|
import sys, time, math
|
||
|
|
from pathlib import Path
|
||
|
|
import numpy as np
|
||
|
|
import pandas as pd
|
||
|
|
|
||
|
|
sys.path.insert(0, str(Path(__file__).parent))
|
||
|
|
|
||
|
|
print("Compiling numba kernels...")
|
||
|
|
t0c = time.time()
|
||
|
|
from nautilus_dolphin.nautilus.alpha_asset_selector import compute_irp_nb, compute_ars_nb, rank_assets_irp_nb
|
||
|
|
from nautilus_dolphin.nautilus.alpha_bet_sizer import compute_sizing_nb
|
||
|
|
from nautilus_dolphin.nautilus.alpha_signal_generator import check_dc_nb
|
||
|
|
_p = np.array([1.0, 2.0, 3.0], dtype=np.float64)
|
||
|
|
compute_irp_nb(_p, -1); compute_ars_nb(1.0, 0.5, 0.01)
|
||
|
|
rank_assets_irp_nb(np.ones((10, 2), dtype=np.float64), 8, -1, 5, 500.0, 20, 0.20)
|
||
|
|
compute_sizing_nb(-0.03, -0.02, -0.05, 3.0, 0.5, 5.0, 0.20, True, True, 0.0,
|
||
|
|
np.zeros(4, dtype=np.int64), np.zeros(4, dtype=np.int64),
|
||
|
|
np.zeros(5, dtype=np.float64), 0, -1, 0.01, 0.04)
|
||
|
|
check_dc_nb(_p, 3, 1, 0.75)
|
||
|
|
print(f" JIT: {time.time() - t0c:.1f}s")
|
||
|
|
|
||
|
|
from nautilus_dolphin.nautilus.alpha_orchestrator import NDAlphaEngine
|
||
|
|
from nautilus_dolphin.nautilus.adaptive_circuit_breaker import AdaptiveCircuitBreaker
|
||
|
|
|
||
|
|
VBT_DIR = Path(r"C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict\vbt_cache")
|
||
|
|
META_COLS = {'timestamp', 'scan_number', 'v50_lambda_max_velocity', 'v150_lambda_max_velocity',
|
||
|
|
'v300_lambda_max_velocity', 'v750_lambda_max_velocity', 'vel_div',
|
||
|
|
'instability_50', 'instability_150'}
|
||
|
|
BASE_ENGINE_KWARGS = dict(
|
||
|
|
initial_capital=25000.0, vel_div_threshold=-0.02, vel_div_extreme=-0.05,
|
||
|
|
min_leverage=0.5, max_leverage=5.0, leverage_convexity=3.0,
|
||
|
|
fraction=0.20, fixed_tp_pct=0.0099, stop_pct=1.0, max_hold_bars=120,
|
||
|
|
use_direction_confirm=True, dc_lookback_bars=7, dc_min_magnitude_bps=0.75,
|
||
|
|
dc_skip_contradicts=True, dc_leverage_boost=1.0, dc_leverage_reduce=0.5,
|
||
|
|
use_asset_selection=True, min_irp_alignment=0.45,
|
||
|
|
use_sp_fees=True, use_sp_slippage=True,
|
||
|
|
sp_maker_entry_rate=0.62, sp_maker_exit_rate=0.50,
|
||
|
|
use_ob_edge=True, ob_edge_bps=5.0, ob_confirm_rate=0.40,
|
||
|
|
lookback=100, use_alpha_layers=True, use_dynamic_leverage=True, seed=42,
|
||
|
|
)
|
||
|
|
VD_THRESH = -0.02; VD_EXTREME = -0.05; CONVEXITY = 3.0
|
||
|
|
BINANCE_MAINT_MARGIN = 0.004 # 0.4% for up to 50x on majors
|
||
|
|
|
||
|
|
# --- Setup ---
|
||
|
|
parquet_files = sorted(VBT_DIR.glob("*.parquet"))
|
||
|
|
date_strings = [pf.stem for pf in parquet_files]
|
||
|
|
|
||
|
|
print("Initializing ACB v6 with dynamic beta...")
|
||
|
|
acb = AdaptiveCircuitBreaker()
|
||
|
|
acb.preload_w750(date_strings)
|
||
|
|
acb_info_by_date = {ds: acb.get_dynamic_boost_for_date(ds) for ds in date_strings}
|
||
|
|
print(f" w750 threshold: {acb._w750_threshold:.6f}")
|
||
|
|
|
||
|
|
# Vol percentile
|
||
|
|
all_vols = []
|
||
|
|
for pf in parquet_files[:2]:
|
||
|
|
df = pd.read_parquet(pf)
|
||
|
|
if 'BTCUSDT' not in df.columns: continue
|
||
|
|
pr = df['BTCUSDT'].values
|
||
|
|
for i in range(60, len(pr)):
|
||
|
|
seg = pr[max(0,i-50):i]
|
||
|
|
if len(seg)<10: continue
|
||
|
|
v = float(np.std(np.diff(seg)/seg[:-1]))
|
||
|
|
if v > 0: all_vols.append(v)
|
||
|
|
vol_p60 = float(np.percentile(all_vols, 60))
|
||
|
|
|
||
|
|
# Pre-load parquet
|
||
|
|
pq_data = {}
|
||
|
|
for pf in parquet_files:
|
||
|
|
df = pd.read_parquet(pf)
|
||
|
|
ac = [c for c in df.columns if c not in META_COLS]
|
||
|
|
bp = df['BTCUSDT'].values if 'BTCUSDT' in df.columns else None
|
||
|
|
dv = np.full(len(df), np.nan)
|
||
|
|
if bp is not None:
|
||
|
|
for i in range(50, len(bp)):
|
||
|
|
seg = bp[max(0,i-50):i]
|
||
|
|
if len(seg)<10: continue
|
||
|
|
dv[i] = float(np.std(np.diff(seg)/seg[:-1]))
|
||
|
|
pq_data[pf.stem] = (df, ac, dv)
|
||
|
|
|
||
|
|
def strength_cubic(vel_div):
|
||
|
|
if vel_div >= VD_THRESH: return 0.0
|
||
|
|
raw = (VD_THRESH - vel_div) / (VD_THRESH - VD_EXTREME)
|
||
|
|
return min(1.0, max(0.0, raw)) ** CONVEXITY
|
||
|
|
|
||
|
|
|
||
|
|
def run_leverage_test(max_lev):
|
||
|
|
"""Run full engine at given max_leverage with ACB v6 dynamic beta."""
|
||
|
|
kwargs = {**BASE_ENGINE_KWARGS, 'max_leverage': max_lev}
|
||
|
|
engine = NDAlphaEngine(**kwargs)
|
||
|
|
bar_idx = 0; ph = {}; dstats = []
|
||
|
|
risk_log = []
|
||
|
|
max_eff_lev = 0.0; min_liq_dist = float('inf')
|
||
|
|
worst_trade_abs = 0.0; worst_trade_pct = 0.0
|
||
|
|
min_capital = engine.capital
|
||
|
|
exposure_at_min_cap = 0.0
|
||
|
|
capital_series = [engine.capital]
|
||
|
|
|
||
|
|
for pf in parquet_files:
|
||
|
|
ds = pf.stem; cs = engine.capital
|
||
|
|
engine.regime_direction = -1
|
||
|
|
engine.regime_dd_halt = False
|
||
|
|
info = acb_info_by_date[ds]
|
||
|
|
base_boost = info['boost']
|
||
|
|
beta = info['beta']
|
||
|
|
|
||
|
|
df, acols, dvol = pq_data[ds]
|
||
|
|
bid = 0
|
||
|
|
for ri in range(len(df)):
|
||
|
|
row = df.iloc[ri]; vd = row.get("vel_div")
|
||
|
|
if vd is None or not np.isfinite(vd): bar_idx+=1; bid+=1; continue
|
||
|
|
prices = {}
|
||
|
|
for ac in acols:
|
||
|
|
p = row[ac]
|
||
|
|
if p and p > 0 and np.isfinite(p):
|
||
|
|
prices[ac] = float(p)
|
||
|
|
if ac not in ph: ph[ac] = []
|
||
|
|
ph[ac].append(float(p))
|
||
|
|
if not prices: bar_idx+=1; bid+=1; continue
|
||
|
|
vrok = False if bid < 100 else (np.isfinite(dvol[ri]) and dvol[ri] > vol_p60)
|
||
|
|
|
||
|
|
if beta > 0 and base_boost > 1.0:
|
||
|
|
ss = strength_cubic(float(vd))
|
||
|
|
engine.regime_size_mult = base_boost * (1.0 + beta * ss)
|
||
|
|
else:
|
||
|
|
engine.regime_size_mult = base_boost
|
||
|
|
|
||
|
|
had_pos = engine.position is not None
|
||
|
|
old_trades = len(engine.trade_history)
|
||
|
|
|
||
|
|
engine.process_bar(bar_idx=bar_idx, vel_div=float(vd), prices=prices,
|
||
|
|
vol_regime_ok=vrok, price_histories=ph)
|
||
|
|
|
||
|
|
# New position opened -> log risk
|
||
|
|
if engine.position is not None and not had_pos:
|
||
|
|
pos = engine.position
|
||
|
|
eff_lev = pos.notional / engine.capital if engine.capital > 0 else 999
|
||
|
|
liq_dist = (engine.capital / pos.notional - BINANCE_MAINT_MARGIN) * 100 if pos.notional > 0 else 999
|
||
|
|
max_eff_lev = max(max_eff_lev, eff_lev)
|
||
|
|
min_liq_dist = min(min_liq_dist, liq_dist)
|
||
|
|
risk_log.append({
|
||
|
|
'date': ds, 'bar': bar_idx, 'asset': pos.asset,
|
||
|
|
'vel_div': float(vd), 'leverage': pos.leverage,
|
||
|
|
'meta_mult': engine.regime_size_mult, 'eff_lev': eff_lev,
|
||
|
|
'notional': pos.notional, 'capital': engine.capital,
|
||
|
|
'liq_dist': liq_dist,
|
||
|
|
})
|
||
|
|
|
||
|
|
# Trade closed -> track worst
|
||
|
|
if len(engine.trade_history) > old_trades:
|
||
|
|
t = engine.trade_history[-1]
|
||
|
|
loss_pct = t.pnl_absolute / (engine.capital - t.pnl_absolute) * 100 if (engine.capital - t.pnl_absolute) > 0 else 0
|
||
|
|
if t.pnl_absolute < worst_trade_abs:
|
||
|
|
worst_trade_abs = t.pnl_absolute
|
||
|
|
worst_trade_pct = loss_pct
|
||
|
|
|
||
|
|
capital_series.append(engine.capital)
|
||
|
|
if engine.capital < min_capital:
|
||
|
|
min_capital = engine.capital
|
||
|
|
if engine.position:
|
||
|
|
exposure_at_min_cap = engine.position.notional
|
||
|
|
bar_idx+=1; bid+=1
|
||
|
|
|
||
|
|
dstats.append({'date': ds, 'pnl': engine.capital - cs, 'cap': engine.capital})
|
||
|
|
|
||
|
|
# Metrics
|
||
|
|
tr = engine.trade_history
|
||
|
|
w = [t for t in tr if t.pnl_absolute > 0]; l = [t for t in tr if t.pnl_absolute <= 0]
|
||
|
|
gw = sum(t.pnl_absolute for t in w) if w else 0
|
||
|
|
gl = abs(sum(t.pnl_absolute for t in l)) if l else 0
|
||
|
|
dr = [s['pnl']/25000*100 for s in dstats]
|
||
|
|
cap_arr = np.array(capital_series)
|
||
|
|
peak_arr = np.maximum.accumulate(cap_arr)
|
||
|
|
dd_arr = (peak_arr - cap_arr) / peak_arr * 100
|
||
|
|
max_dd = float(np.max(dd_arr))
|
||
|
|
|
||
|
|
# Monte Carlo ruin
|
||
|
|
ruin_pct = 0.0
|
||
|
|
if tr:
|
||
|
|
pnl_dist = np.array([t.pnl_absolute for t in tr])
|
||
|
|
ruin_count = 0
|
||
|
|
for _ in range(5000):
|
||
|
|
cap = 25000.0
|
||
|
|
sim = np.random.choice(pnl_dist, size=len(tr), replace=True)
|
||
|
|
for pnl in sim:
|
||
|
|
cap += pnl
|
||
|
|
if cap < 12500: # 50% DD
|
||
|
|
ruin_count += 1; break
|
||
|
|
ruin_pct = ruin_count / 5000 * 100
|
||
|
|
|
||
|
|
mid = len(parquet_files) // 2
|
||
|
|
h1 = sum(s['pnl'] for s in dstats[:mid])
|
||
|
|
h2 = sum(s['pnl'] for s in dstats[mid:])
|
||
|
|
|
||
|
|
return {
|
||
|
|
'max_lev': max_lev,
|
||
|
|
'roi': (engine.capital - 25000) / 25000 * 100,
|
||
|
|
'pf': gw / gl if gl > 0 else 999,
|
||
|
|
'dd': max_dd,
|
||
|
|
'sharpe': np.mean(dr) / np.std(dr) * np.sqrt(365) if np.std(dr) > 0 else 0,
|
||
|
|
'trades': len(tr),
|
||
|
|
'cap': engine.capital,
|
||
|
|
'max_eff_lev': max_eff_lev,
|
||
|
|
'min_liq_dist': min_liq_dist,
|
||
|
|
'worst_abs': worst_trade_abs,
|
||
|
|
'worst_pct': worst_trade_pct,
|
||
|
|
'min_capital': min_capital,
|
||
|
|
'ruin_50dd': ruin_pct,
|
||
|
|
'h1': h1, 'h2': h2,
|
||
|
|
'h2_h1': h2/h1 if h1 != 0 else 0,
|
||
|
|
}, risk_log, dstats
|
||
|
|
|
||
|
|
|
||
|
|
# --- SWEEP ---
|
||
|
|
t0 = time.time()
|
||
|
|
print(f"\n{'='*140}")
|
||
|
|
print(f"{'MAX_LEV':>8} {'ROI%':>7} {'PF':>6} {'DD%':>6} {'SHARPE':>7} {'TRADES':>7} "
|
||
|
|
f"{'MAX_EFF_LEV':>12} {'MIN_LIQ%':>9} {'WORST_TRADE':>12} {'RUIN_50DD%':>11} "
|
||
|
|
f"{'H1_PNL':>10} {'H2_PNL':>10} {'H2/H1':>6}")
|
||
|
|
print(f"{'='*140}")
|
||
|
|
|
||
|
|
all_results = {}
|
||
|
|
for max_lev in [5, 8, 10, 15, 20, 25]:
|
||
|
|
t1 = time.time()
|
||
|
|
r, rlog, ds = run_leverage_test(max_lev)
|
||
|
|
all_results[max_lev] = (r, rlog, ds)
|
||
|
|
|
||
|
|
danger = ""
|
||
|
|
if r['min_liq_dist'] < 2: danger = " DANGER"
|
||
|
|
elif r['min_liq_dist'] < 5: danger = " !!!"
|
||
|
|
elif r['min_liq_dist'] < 10: danger = " !!"
|
||
|
|
elif r['min_liq_dist'] < 15: danger = " !"
|
||
|
|
if r['ruin_50dd'] > 30: danger += " RUIN"
|
||
|
|
|
||
|
|
print(f" {max_lev:>5}x {r['roi']:>+7.2f} {r['pf']:>6.3f} {r['dd']:>6.2f} {r['sharpe']:>7.2f} {r['trades']:>7} "
|
||
|
|
f"{r['max_eff_lev']:>11.2f}x {r['min_liq_dist']:>8.1f}% {r['worst_abs']:>+12.2f} {r['ruin_50dd']:>10.1f}% "
|
||
|
|
f"{r['h1']:>+10.2f} {r['h2']:>+10.2f} {r['h2_h1']:>6.2f}{danger} [{time.time()-t1:.0f}s]")
|
||
|
|
|
||
|
|
# --- TOP 10 RISKIEST ENTRIES per key leverage levels ---
|
||
|
|
for max_lev in [10, 15, 25]:
|
||
|
|
_, rlog, _ = all_results[max_lev]
|
||
|
|
rlog_sorted = sorted(rlog, key=lambda x: x['liq_dist'])
|
||
|
|
print(f"\n--- TOP 10 RISKIEST ENTRIES (max_lev={max_lev}x) ---")
|
||
|
|
print(f"{'DATE':<12} {'ASSET':<12} {'VEL_DIV':>8} {'LEV':>5} {'META':>6} {'EFF_LEV':>8} "
|
||
|
|
f"{'NOTIONAL':>10} {'CAPITAL':>10} {'LIQ_DIST%':>10}")
|
||
|
|
for e in rlog_sorted[:10]:
|
||
|
|
print(f"{e['date']:<12} {e['asset']:<12} {e['vel_div']:>8.4f} {e['leverage']:>5.2f} "
|
||
|
|
f"{e['meta_mult']:>6.2f} {e['eff_lev']:>8.2f}x {e['notional']:>10.2f} "
|
||
|
|
f"{e['capital']:>10.2f} {e['liq_dist']:>9.1f}%")
|
||
|
|
|
||
|
|
# --- RISK-ADJUSTED COMPARISON ---
|
||
|
|
print(f"\n--- RISK-ADJUSTED RANKING (ROI/DD ratio, Sharpe) ---")
|
||
|
|
for max_lev in [5, 8, 10, 15, 20, 25]:
|
||
|
|
r = all_results[max_lev][0]
|
||
|
|
roi_dd = r['roi'] / r['dd'] if r['dd'] > 0 else 0
|
||
|
|
safe = "SAFE" if r['min_liq_dist'] > 10 and r['ruin_50dd'] < 15 else "CAUTION" if r['min_liq_dist'] > 5 else "DANGER"
|
||
|
|
print(f" {max_lev:>3}x: ROI/DD={roi_dd:.2f} Sharpe={r['sharpe']:.2f} "
|
||
|
|
f"Ruin={r['ruin_50dd']:.1f}% MinLiq={r['min_liq_dist']:.1f}% [{safe}]")
|
||
|
|
|
||
|
|
# --- OPTIMAL LEVERAGE IDENTIFICATION ---
|
||
|
|
# Best = highest Sharpe with ruin < 15% and liq_dist > 5%
|
||
|
|
safe_results = {k: v[0] for k, v in all_results.items()
|
||
|
|
if v[0]['ruin_50dd'] < 15 and v[0]['min_liq_dist'] > 5}
|
||
|
|
if safe_results:
|
||
|
|
best_lev = max(safe_results, key=lambda k: safe_results[k]['sharpe'])
|
||
|
|
br = safe_results[best_lev]
|
||
|
|
print(f"\n=== OPTIMAL SAFE LEVERAGE: {best_lev}x ===")
|
||
|
|
print(f" ROI={br['roi']:+.2f}%, PF={br['pf']:.3f}, Sharpe={br['sharpe']:.2f}, DD={br['dd']:.2f}%")
|
||
|
|
print(f" Max eff lev={br['max_eff_lev']:.2f}x, Min liq dist={br['min_liq_dist']:.1f}%")
|
||
|
|
print(f" Ruin(50%DD)={br['ruin_50dd']:.1f}%, H2/H1={br['h2_h1']:.2f}")
|
||
|
|
else:
|
||
|
|
print("\n WARNING: No leverage level passes safety criteria (ruin<15%, liq>5%)")
|
||
|
|
|
||
|
|
print(f"\nTotal time: {time.time()-t0:.0f}s")
|