133 lines
5.0 KiB
Python
133 lines
5.0 KiB
Python
|
|
"""
|
||
|
|
test_dliq_goldstyle.py — Run D_LIQ using EXACTLY the same data setup as
|
||
|
|
test_pf_dynamic_beta_validate.py (gold test), but with create_d_liq_engine()
|
||
|
|
instead of NDAlphaEngine.
|
||
|
|
|
||
|
|
This is the most faithful reproduction of certification conditions.
|
||
|
|
"""
|
||
|
|
import sys, time, math
|
||
|
|
from pathlib import Path
|
||
|
|
import numpy as np
|
||
|
|
import pandas as pd
|
||
|
|
|
||
|
|
ROOT = Path(r"C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict")
|
||
|
|
sys.path.insert(0, str(ROOT / 'nautilus_dolphin'))
|
||
|
|
sys.path.insert(0, str(ROOT / 'nautilus_dolphin' / 'dvae'))
|
||
|
|
|
||
|
|
import exp_shared
|
||
|
|
from nautilus_dolphin.nautilus.proxy_boost_engine import create_d_liq_engine
|
||
|
|
from nautilus_dolphin.nautilus.adaptive_circuit_breaker import AdaptiveCircuitBreaker
|
||
|
|
from nautilus_dolphin.nautilus.ob_features import OBFeatureEngine
|
||
|
|
from nautilus_dolphin.nautilus.ob_provider import MockOBProvider
|
||
|
|
|
||
|
|
print("Ensuring JIT...", flush=True)
|
||
|
|
exp_shared.ensure_jit()
|
||
|
|
|
||
|
|
VBT_DIR = exp_shared.VBT_DIR
|
||
|
|
parquet_files = sorted(VBT_DIR.glob("*.parquet"))
|
||
|
|
parquet_files = [p for p in parquet_files if 'catalog' not in str(p)]
|
||
|
|
date_strings = [p.stem for p in parquet_files]
|
||
|
|
print(f"Found {len(parquet_files)} parquet files", flush=True)
|
||
|
|
|
||
|
|
META_COLS = exp_shared.META_COLS
|
||
|
|
|
||
|
|
# EXACT gold test vol_p60 computation: first 2 files, range(60), seg-based
|
||
|
|
all_vols = []
|
||
|
|
for pf in parquet_files[:2]:
|
||
|
|
df = pd.read_parquet(pf)
|
||
|
|
if 'BTCUSDT' not in df.columns:
|
||
|
|
continue
|
||
|
|
pr = df['BTCUSDT'].values
|
||
|
|
for i in range(60, len(pr)):
|
||
|
|
seg = pr[max(0,i-50):i]
|
||
|
|
if len(seg) < 10:
|
||
|
|
continue
|
||
|
|
v = float(np.std(np.diff(seg)/seg[:-1]))
|
||
|
|
if v > 0:
|
||
|
|
all_vols.append(v)
|
||
|
|
del df
|
||
|
|
vol_p60 = float(np.percentile(all_vols, 60))
|
||
|
|
print(f"vol_p60 (gold test method, 2 files, offset 60): {vol_p60:.8f}", flush=True)
|
||
|
|
|
||
|
|
# EXACT gold test pq_data loading: all 56 files, ALL assets, offset 50
|
||
|
|
print("Loading all 56 parquet files (float64, gold test style)...", flush=True)
|
||
|
|
pq_data = {}
|
||
|
|
all_assets = set()
|
||
|
|
for pf in parquet_files:
|
||
|
|
df = pd.read_parquet(pf)
|
||
|
|
ac = [c for c in df.columns if c not in META_COLS]
|
||
|
|
all_assets.update(ac)
|
||
|
|
bp = df['BTCUSDT'].values if 'BTCUSDT' in df.columns else None
|
||
|
|
dv = np.full(len(df), np.nan)
|
||
|
|
if bp is not None:
|
||
|
|
for i in range(50, len(bp)):
|
||
|
|
seg = bp[max(0,i-50):i]
|
||
|
|
if len(seg) < 10:
|
||
|
|
continue
|
||
|
|
dv[i] = float(np.std(np.diff(seg)/seg[:-1]))
|
||
|
|
pq_data[pf.stem] = (df, ac, dv)
|
||
|
|
|
||
|
|
# EXACT gold test OB setup: all assets
|
||
|
|
OB_ASSETS = sorted(list(all_assets))
|
||
|
|
print(f"OB_ASSETS count: {len(OB_ASSETS)}", flush=True)
|
||
|
|
_mock_ob = MockOBProvider(
|
||
|
|
imbalance_bias=-0.09, depth_scale=1.0, assets=OB_ASSETS,
|
||
|
|
imbalance_biases={"BTCUSDT": -0.086, "ETHUSDT": -0.092,
|
||
|
|
"BNBUSDT": +0.05, "SOLUSDT": +0.05},
|
||
|
|
)
|
||
|
|
ob_eng = OBFeatureEngine(_mock_ob)
|
||
|
|
ob_eng.preload_date("mock", OB_ASSETS)
|
||
|
|
|
||
|
|
print(f"All {len(pq_data)} days loaded. Starting D_LIQ run...", flush=True)
|
||
|
|
|
||
|
|
# Create D_LIQ engine (SAME as gold certification)
|
||
|
|
kw = exp_shared.ENGINE_KWARGS.copy()
|
||
|
|
acb = AdaptiveCircuitBreaker()
|
||
|
|
acb.preload_w750(date_strings)
|
||
|
|
|
||
|
|
eng = create_d_liq_engine(**kw)
|
||
|
|
eng.set_ob_engine(ob_eng)
|
||
|
|
eng.set_acb(acb)
|
||
|
|
eng.set_esoteric_hazard_multiplier(0.0) # Current code: ceiling=10.0, sets base_max=10.0
|
||
|
|
|
||
|
|
print(f"After hazard call: base_max={eng.base_max_leverage} abs_max={eng.abs_max_leverage}", flush=True)
|
||
|
|
|
||
|
|
daily_caps, daily_pnls = [], []
|
||
|
|
t0 = time.time()
|
||
|
|
|
||
|
|
for i, pf in enumerate(parquet_files):
|
||
|
|
ds = pf.stem
|
||
|
|
df, acols, dvol = pq_data[ds]
|
||
|
|
cap_before = eng.capital
|
||
|
|
vol_ok = np.where(np.isfinite(dvol), dvol > vol_p60, False)
|
||
|
|
eng.process_day(ds, df, acols, vol_regime_ok=vol_ok)
|
||
|
|
daily_caps.append(eng.capital)
|
||
|
|
daily_pnls.append(eng.capital - cap_before)
|
||
|
|
if (i+1) % 20 == 0 or i == len(parquet_files)-1:
|
||
|
|
print(f" Day {i+1}/{len(parquet_files)}: cap=${eng.capital:,.0f} T={len(eng.trade_history)} ({time.time()-t0:.0f}s)", flush=True)
|
||
|
|
|
||
|
|
tr = eng.trade_history
|
||
|
|
n = len(tr)
|
||
|
|
roi = (eng.capital - 25000.0) / 25000.0 * 100.0
|
||
|
|
peak_cap, max_dd = 25000.0, 0.0
|
||
|
|
for cap in daily_caps:
|
||
|
|
peak_cap = max(peak_cap, cap)
|
||
|
|
max_dd = max(max_dd, (peak_cap - cap) / peak_cap * 100.0)
|
||
|
|
|
||
|
|
def _abs(t): return t.pnl_absolute if hasattr(t,'pnl_absolute') else t.pnl_pct*250.0
|
||
|
|
wins = [t for t in tr if _abs(t) > 0]
|
||
|
|
pf_val = sum(_abs(t) for t in wins) / max(abs(sum(_abs(t) for t in tr if _abs(t)<=0)), 1e-9) if n > 0 else 0
|
||
|
|
dr = np.array([p/25000.*100. for p in daily_pnls])
|
||
|
|
sharpe = float(dr.mean()/(dr.std()+1e-9)*math.sqrt(365)) if n > 0 else 0
|
||
|
|
calmar = roi / max(max_dd, 0.01) if n > 0 else 0
|
||
|
|
|
||
|
|
elapsed = time.time() - t0
|
||
|
|
|
||
|
|
print(f"\n{'='*65}", flush=True)
|
||
|
|
print(f"D_LIQ GOLD-STYLE RESULT:", flush=True)
|
||
|
|
print(f" ROI={roi:+.2f}% T={n} DD={max_dd:.2f}% PF={pf_val:.3f} Calmar={calmar:.2f} ({elapsed:.0f}s)", flush=True)
|
||
|
|
print(f" liq_stops={eng.liquidation_stops}", flush=True)
|
||
|
|
print(f"GOLD TARGET: ROI=+181.81% T=2155 DD=17.65%", flush=True)
|
||
|
|
print(f"T match: {'PASS' if abs(n-2155)<=10 else 'FAIL'} (diff={n-2155:+d})", flush=True)
|
||
|
|
print(f"ROI match: {'PASS' if abs(roi-181.81)<=2.0 else 'FAIL'} (diff={roi-181.81:+.2f}pp)", flush=True)
|