241 lines
10 KiB
Python
241 lines
10 KiB
Python
|
|
import sys, time, datetime, json
|
||
|
|
from pathlib import Path
|
||
|
|
import numpy as np
|
||
|
|
import pandas as pd
|
||
|
|
|
||
|
|
sys.path.insert(0, str(Path(__file__).parent))
|
||
|
|
|
||
|
|
from nautilus_dolphin.nautilus.alpha_orchestrator import NDAlphaEngine as PristineEngine
|
||
|
|
from nautilus_dolphin.nautilus.esf_alpha_orchestrator import NDAlphaEngine as ThrottledEngine
|
||
|
|
from nautilus_dolphin.nautilus.adaptive_circuit_breaker import AdaptiveCircuitBreaker
|
||
|
|
from nautilus_dolphin.nautilus.ob_features import OBFeatureEngine
|
||
|
|
from nautilus_dolphin.nautilus.ob_provider import MockOBProvider
|
||
|
|
|
||
|
|
VBT_DIR = Path(r"C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict\vbt_cache")
|
||
|
|
parquet_files = sorted(VBT_DIR.glob("*.parquet"))
|
||
|
|
parquet_files = [p for p in parquet_files if 'catalog' not in str(p)]
|
||
|
|
|
||
|
|
print("1. Loading VBT Cache and Pre-Calculated Esoteric Targets...")
|
||
|
|
|
||
|
|
acb = AdaptiveCircuitBreaker()
|
||
|
|
acb.preload_w750([pf.stem for pf in parquet_files])
|
||
|
|
|
||
|
|
mock = MockOBProvider(imbalance_bias=-0.09, depth_scale=1.0,
|
||
|
|
assets=["BTCUSDT", "ETHUSDT", "BNBUSDT", "SOLUSDT"],
|
||
|
|
imbalance_biases={"BNBUSDT": 0.20, "SOLUSDT": 0.20})
|
||
|
|
ob_engine_inst = OBFeatureEngine(mock)
|
||
|
|
ob_engine_inst.preload_date("mock", mock.get_assets())
|
||
|
|
|
||
|
|
ENGINE_KWARGS = dict(
|
||
|
|
initial_capital=25000.0, vel_div_threshold=-0.02, vel_div_extreme=-0.05,
|
||
|
|
min_leverage=0.5, max_leverage=6.0, leverage_convexity=3.0,
|
||
|
|
fraction=0.20, fixed_tp_pct=0.0099, stop_pct=1.0, max_hold_bars=120,
|
||
|
|
use_direction_confirm=True, dc_lookback_bars=7, dc_min_magnitude_bps=0.75,
|
||
|
|
dc_skip_contradicts=True, dc_leverage_boost=1.0, dc_leverage_reduce=0.5,
|
||
|
|
use_asset_selection=True, min_irp_alignment=0.45,
|
||
|
|
use_sp_fees=True, use_sp_slippage=True,
|
||
|
|
use_ob_edge=True, ob_edge_bps=5.0, ob_confirm_rate=0.40,
|
||
|
|
lookback=100, use_alpha_layers=True, use_dynamic_leverage=True, seed=42,
|
||
|
|
)
|
||
|
|
|
||
|
|
engine_pristine = PristineEngine(**ENGINE_KWARGS)
|
||
|
|
engine_pristine.set_ob_engine(ob_engine_inst)
|
||
|
|
|
||
|
|
engine_throttled = ThrottledEngine(**ENGINE_KWARGS)
|
||
|
|
engine_throttled.set_ob_engine(ob_engine_inst)
|
||
|
|
|
||
|
|
# Read datasets into memory
|
||
|
|
pq_data = {}
|
||
|
|
all_vols_engine = []
|
||
|
|
|
||
|
|
for pf in parquet_files:
|
||
|
|
ds = pf.stem
|
||
|
|
df = pd.read_parquet(pf)
|
||
|
|
ac = [c for c in df.columns if c not in {'timestamp', 'scan_number', 'v50_lambda_max_velocity', 'v150_lambda_max_velocity',
|
||
|
|
'v300_lambda_max_velocity', 'v750_lambda_max_velocity', 'vel_div',
|
||
|
|
'instability_50', 'instability_150'}]
|
||
|
|
dv = df['vel_div'].values if 'vel_div' in df.columns else np.zeros(len(df))
|
||
|
|
|
||
|
|
# Load corresponding Esoteric factors
|
||
|
|
eso_path = VBT_DIR / f"ESOTERIC_data_{ds}.json"
|
||
|
|
eso = {}
|
||
|
|
if eso_path.exists():
|
||
|
|
with open(eso_path, 'r') as f:
|
||
|
|
eso = json.load(f)
|
||
|
|
|
||
|
|
pq_data[ds] = (df, ac, dv, eso)
|
||
|
|
|
||
|
|
def evaluate_eso_hazard(eso_data: dict) -> float:
|
||
|
|
"""
|
||
|
|
Computes a continuous 0.0 -> 1.0 hazard score based on the
|
||
|
|
categorical wipeout vulnerabilities previously measured.
|
||
|
|
"""
|
||
|
|
if not eso_data: return 0.0
|
||
|
|
|
||
|
|
hazard_score = 0.0
|
||
|
|
day_of_week = eso_data.get('day_of_week', -1)
|
||
|
|
moon_illum = eso_data.get('moon_illumination', 0.5)
|
||
|
|
|
||
|
|
# Eve 1 = Tuesday (Highest historical wipeout probability)
|
||
|
|
if day_of_week == 1:
|
||
|
|
hazard_score = max(hazard_score, 1.0)
|
||
|
|
# Eve 0 or 2 = Mon/Wed (Mid-level wipeout probability)
|
||
|
|
elif day_of_week in [0, 2]:
|
||
|
|
hazard_score = max(hazard_score, 0.5)
|
||
|
|
|
||
|
|
# Dark moon proxy (Bottom 10% illumination dramatically increased MaxDD)
|
||
|
|
if moon_illum <= 0.05:
|
||
|
|
hazard_score = max(hazard_score, 1.0) # Full severity
|
||
|
|
|
||
|
|
return hazard_score
|
||
|
|
|
||
|
|
def compute_mc_cagr(returns, periods=252, n_simulations=1000):
|
||
|
|
np.random.seed(42)
|
||
|
|
daily_returns = np.array(returns)
|
||
|
|
simulated_returns = np.random.choice(daily_returns, size=(n_simulations, periods), replace=True)
|
||
|
|
equity_curves = np.cumprod(1.0 + simulated_returns, axis=1)
|
||
|
|
cagrs = (equity_curves[:, -1] - 1.0) * 100
|
||
|
|
median_cagr = np.median(cagrs)
|
||
|
|
p05_cagr = np.percentile(cagrs, 5)
|
||
|
|
|
||
|
|
max_dds = np.zeros(n_simulations)
|
||
|
|
for i in range(n_simulations):
|
||
|
|
curve = equity_curves[i]
|
||
|
|
peaks = np.maximum.accumulate(curve)
|
||
|
|
drawdowns = (peaks - curve) / peaks
|
||
|
|
max_dds[i] = np.max(drawdowns)
|
||
|
|
|
||
|
|
prob_40dd = np.mean(max_dds >= 0.40) * 100
|
||
|
|
med_max_dd = np.median(max_dds) * 100
|
||
|
|
return median_cagr, p05_cagr, prob_40dd, med_max_dd
|
||
|
|
|
||
|
|
print("2. Firing Parallel Run Trajectories...")
|
||
|
|
|
||
|
|
bar_idx = 0
|
||
|
|
returns_pristine = []
|
||
|
|
returns_throttled = []
|
||
|
|
|
||
|
|
peak_cap_pristine = engine_pristine.capital
|
||
|
|
peak_cap_throttled = engine_throttled.capital
|
||
|
|
trough_cap_pristine = engine_pristine.capital
|
||
|
|
trough_cap_throttled = engine_throttled.capital
|
||
|
|
|
||
|
|
start_full_pristine = engine_pristine.capital
|
||
|
|
start_full_throttled = engine_throttled.capital
|
||
|
|
|
||
|
|
for pf in parquet_files:
|
||
|
|
ds = pf.stem
|
||
|
|
df, acols, dvol_raw, eso = pq_data[ds]
|
||
|
|
|
||
|
|
cs_pris = engine_pristine.capital
|
||
|
|
cs_throt = engine_throttled.capital
|
||
|
|
|
||
|
|
acb_info = acb.get_dynamic_boost_for_date(ds, ob_engine=ob_engine_inst)
|
||
|
|
base_boost = acb_info['boost']
|
||
|
|
beta = acb_info['beta']
|
||
|
|
|
||
|
|
# Pre-shift Esoteric evaluation (T-1) predicting the danger of the CURRENT day.
|
||
|
|
# The JSON data for 'ds' contains the timestamp for the exact day evaluated, so it mirrors the live service mapping.
|
||
|
|
hazard_score = evaluate_eso_hazard(eso)
|
||
|
|
|
||
|
|
# Orchestrator Configuration Injection for today
|
||
|
|
engine_throttled.set_esoteric_hazard_multiplier(hazard_score)
|
||
|
|
|
||
|
|
ph_p = {}
|
||
|
|
ph_t = {}
|
||
|
|
|
||
|
|
for ri in range(len(df)):
|
||
|
|
row = df.iloc[ri]
|
||
|
|
vd = dvol_raw[ri]
|
||
|
|
if not np.isfinite(vd): bar_idx+=1; continue
|
||
|
|
|
||
|
|
prices = {}
|
||
|
|
for ac in acols:
|
||
|
|
p = row[ac]
|
||
|
|
if p and p > 0 and np.isfinite(p):
|
||
|
|
prices[ac] = float(p)
|
||
|
|
|
||
|
|
# Update Pristine
|
||
|
|
if ac not in ph_p: ph_p[ac] = []
|
||
|
|
ph_p[ac].append(float(p))
|
||
|
|
if len(ph_p[ac]) > 500: ph_p[ac] = ph_p[ac][-200:]
|
||
|
|
|
||
|
|
# Update Throttled
|
||
|
|
if ac not in ph_t: ph_t[ac] = []
|
||
|
|
ph_t[ac].append(float(p))
|
||
|
|
if len(ph_t[ac]) > 500: ph_t[ac] = ph_t[ac][-200:]
|
||
|
|
|
||
|
|
if not prices: bar_idx+=1; continue
|
||
|
|
|
||
|
|
# Global BTC Rolling Volatility Evaluation
|
||
|
|
btc_hist = ph_p.get("BTCUSDT", [])
|
||
|
|
engine_vrok = False
|
||
|
|
if len(btc_hist) >= 50:
|
||
|
|
seg = btc_hist[-50:]
|
||
|
|
vd_eng = float(np.std(np.diff(seg)/np.array(seg[:-1])))
|
||
|
|
all_vols_engine.append(vd_eng)
|
||
|
|
if len(all_vols_engine) > 100:
|
||
|
|
engine_vrok = vd_eng > np.percentile(all_vols_engine, 60)
|
||
|
|
|
||
|
|
# ACB
|
||
|
|
if beta > 0:
|
||
|
|
ss = 0.0
|
||
|
|
if vd < -0.02:
|
||
|
|
raw = (-0.02 - float(vd)) / (-0.02 - -0.05)
|
||
|
|
ss = min(1.0, max(0.0, raw)) ** 3.0
|
||
|
|
boost_mult = base_boost * (1.0 + beta * ss)
|
||
|
|
else:
|
||
|
|
boost_mult = base_boost
|
||
|
|
|
||
|
|
engine_pristine.regime_size_mult = boost_mult
|
||
|
|
engine_throttled.regime_size_mult = boost_mult
|
||
|
|
|
||
|
|
engine_pristine.process_bar(bar_idx=bar_idx, vel_div=float(vd), prices=prices, vol_regime_ok=engine_vrok, price_histories=ph_p)
|
||
|
|
engine_throttled.process_bar(bar_idx=bar_idx, vel_div=float(vd), prices=prices, vol_regime_ok=engine_vrok, price_histories=ph_t)
|
||
|
|
|
||
|
|
peak_cap_pristine = max(peak_cap_pristine, engine_pristine.capital)
|
||
|
|
peak_cap_throttled = max(peak_cap_throttled, engine_throttled.capital)
|
||
|
|
trough_cap_pristine = min(trough_cap_pristine, engine_pristine.capital)
|
||
|
|
trough_cap_throttled = min(trough_cap_throttled, engine_throttled.capital)
|
||
|
|
|
||
|
|
bar_idx += 1
|
||
|
|
|
||
|
|
returns_pristine.append((engine_pristine.capital - cs_pris) / cs_pris if cs_pris > 0 else 0)
|
||
|
|
returns_throttled.append((engine_throttled.capital - cs_throt) / cs_throt if cs_throt > 0 else 0)
|
||
|
|
|
||
|
|
print("\n==========================================================================================")
|
||
|
|
print(" ESOTERIC FACTOR (EsoF) HAZARD THROTTLE BACKTEST (Pristine vs Throttled)")
|
||
|
|
print("==========================================================================================")
|
||
|
|
|
||
|
|
res_p = engine_pristine.get_performance_summary()
|
||
|
|
res_t = engine_throttled.get_performance_summary()
|
||
|
|
|
||
|
|
mc_p = compute_mc_cagr(returns_pristine)
|
||
|
|
mc_t = compute_mc_cagr(returns_throttled)
|
||
|
|
|
||
|
|
max_dd_p = (peak_cap_pristine - trough_cap_pristine) / peak_cap_pristine
|
||
|
|
max_dd_t = (peak_cap_throttled - trough_cap_throttled) / peak_cap_throttled
|
||
|
|
|
||
|
|
print(f"{'Metric':<25} | {'A: Pristine (No EsoF)':<25} | {'B: Throttled (EsoF 6x->3x)':<25}")
|
||
|
|
print("-" * 80)
|
||
|
|
print(f"{'Terminal Capital':<25} | ${res_p['final_capital']:<24.2f} | ${res_t['final_capital']:<24.2f}")
|
||
|
|
print(f"{'Total ROI':<25} | {(res_p['final_capital']/start_full_pristine - 1.0):<24.2%} | {(res_t['final_capital']/start_full_throttled - 1.0):<24.2%}")
|
||
|
|
print(f"{'Compounding Med CAGR':<25} | {mc_p[0]:>5.1f}%{'':<18} | {mc_t[0]:>5.1f}%")
|
||
|
|
print(f"{'5th Pctile CAGR (Tail)':<25} | {mc_p[1]:>5.1f}%{'':<18} | {mc_t[1]:>5.1f}%")
|
||
|
|
print(f"{'Max Empirical DD':<25} | {max_dd_p:<24.2%} | {max_dd_t:<24.2%}")
|
||
|
|
print(f"{'MC Med Drawdown':<25} | {mc_p[3]:<23.1f}% | {mc_t[3]:<23.1f}%")
|
||
|
|
print(f"{'MC P(>40% DD)':<25} | {mc_p[2]:<23.1f}% | {mc_t[2]:<23.1f}%")
|
||
|
|
|
||
|
|
print("-" * 80)
|
||
|
|
print(f"{'Total Trades':<25} | {res_p['total_trades']:<25} | {res_t['total_trades']:<25}")
|
||
|
|
print(f"{'Win Rate':<25} | {res_p['win_rate']:<24.2%} | {res_t['win_rate']:<24.2%}")
|
||
|
|
print(f"{'Profit Factor':<25} | {res_p['profit_factor']:<24.2f} | {res_t['profit_factor']:<24.2f}")
|
||
|
|
|
||
|
|
print("\nVerdict:")
|
||
|
|
if res_t['profit_factor'] > res_p['profit_factor'] and mc_t[3] < mc_p[3]:
|
||
|
|
print("-> SUCCESS: Esoteric categorical throttling successfully damped structural tail variance and improved overall Profit Factor.")
|
||
|
|
elif mc_t[0] < (mc_p[0] * 0.90): # CAGR drop heavily outweighs hazard safety
|
||
|
|
print("-> FAILED: Esoteric categorical throttling killed too much compound growth. Hazards were overfit or false.")
|
||
|
|
else:
|
||
|
|
print("-> NEUTRAL: Marginal changes. Hazard avoidance perfectly balanced out forfeited growth.")
|