96 lines
3.1 KiB
Python
96 lines
3.1 KiB
Python
|
|
import sys
|
||
|
|
from pathlib import Path
|
||
|
|
import json
|
||
|
|
import numpy as np
|
||
|
|
import time
|
||
|
|
|
||
|
|
HCM_DIR = Path(r"C:\Users\Lenovo\Documents\- DOLPHIN NG HD HCM TSF Predict")
|
||
|
|
sys.path.insert(0, str(HCM_DIR / 'nautilus_dolphin'))
|
||
|
|
sys.path.insert(0, str(HCM_DIR / 'nautilus_dolphin' / 'dvae'))
|
||
|
|
|
||
|
|
# We'll monkeypatch run_backtest to add progress bars
|
||
|
|
import exp_shared
|
||
|
|
|
||
|
|
def replicate():
|
||
|
|
print("="*60)
|
||
|
|
print("EXACT REPLICATION OF D_LIQ_GOLD PERFECT MAKER (181.81%)")
|
||
|
|
print("="*60)
|
||
|
|
exp_shared.ensure_jit()
|
||
|
|
|
||
|
|
perfect_kwargs = {
|
||
|
|
'seed': 42,
|
||
|
|
'sp_maker_entry_rate': 1.0,
|
||
|
|
'sp_maker_exit_rate': 1.0,
|
||
|
|
'use_sp_fees': True,
|
||
|
|
'use_sp_slippage': False,
|
||
|
|
}
|
||
|
|
|
||
|
|
print("Running D_liq (8x/9x) configuration in PERFECT MAKER mode...")
|
||
|
|
# Using the monkeypatched version that prints daily progress to avoid 'locked up' appearance
|
||
|
|
from nautilus_dolphin.nautilus.proxy_boost_engine import create_d_liq_engine
|
||
|
|
|
||
|
|
# Manually running loop here for transparency
|
||
|
|
from exp_shared import load_data, ENGINE_KWARGS, MC_BASE_CFG
|
||
|
|
from nautilus_dolphin.nautilus.adaptive_circuit_breaker import AdaptiveCircuitBreaker
|
||
|
|
import pandas as pd
|
||
|
|
import gc
|
||
|
|
|
||
|
|
d = load_data()
|
||
|
|
kw = ENGINE_KWARGS.copy()
|
||
|
|
kw.update(perfect_kwargs)
|
||
|
|
|
||
|
|
acb = AdaptiveCircuitBreaker()
|
||
|
|
acb.preload_w750(d['date_strings'])
|
||
|
|
|
||
|
|
eng = create_d_liq_engine(**kw)
|
||
|
|
eng.set_ob_engine(d['ob_eng'])
|
||
|
|
eng.set_acb(acb)
|
||
|
|
|
||
|
|
daily_caps, daily_pnls = [], []
|
||
|
|
total_days = len(d['parquet_files'])
|
||
|
|
|
||
|
|
t_start = time.time()
|
||
|
|
for i, pf in enumerate(d['parquet_files']):
|
||
|
|
ds = pf.stem
|
||
|
|
# Print progress every 5 days or first/last
|
||
|
|
if i == 0 or i == total_days-1 or (i+1) % 10 == 0:
|
||
|
|
elapsed = time.time() - t_start
|
||
|
|
print(f" Day {i+1}/{total_days}: {ds} | Cap: ${eng.capital:,.2f} | Trades: {len(eng.trade_history)} | Elapsed: {elapsed:.0f}s")
|
||
|
|
|
||
|
|
df = pd.read_parquet(pf)
|
||
|
|
acols = [c for c in df.columns if c not in exp_shared.META_COLS]
|
||
|
|
|
||
|
|
bp = df['BTCUSDT'].values if 'BTCUSDT' in df.columns else None
|
||
|
|
dvol = np.full(len(df), np.nan)
|
||
|
|
if bp is not None:
|
||
|
|
diffs = np.zeros(len(bp), dtype=np.float64)
|
||
|
|
diffs[1:] = np.diff(bp) / bp[:-1]
|
||
|
|
for j in range(50, len(bp)):
|
||
|
|
dvol[j] = np.std(diffs[j-50:j])
|
||
|
|
|
||
|
|
cap_before = eng.capital
|
||
|
|
vol_ok = np.where(np.isfinite(dvol), dvol > d['vol_p60'], False)
|
||
|
|
eng.process_day(ds, df, acols, vol_regime_ok=vol_ok)
|
||
|
|
daily_caps.append(eng.capital)
|
||
|
|
daily_pnls.append(eng.capital - cap_before)
|
||
|
|
|
||
|
|
del df
|
||
|
|
gc.collect()
|
||
|
|
|
||
|
|
tr = eng.trade_history
|
||
|
|
n = len(tr)
|
||
|
|
roi = (eng.capital - 25000.0) / 25000.0 * 100.0
|
||
|
|
|
||
|
|
print("\nREPLICATION RESULTS:")
|
||
|
|
print(f" ROI: {roi:.2f}% (Target: 181.81%)")
|
||
|
|
print(f" Trades: {n} (Target: 2155)")
|
||
|
|
print(f" Final Cap: ${eng.capital:,.2f}")
|
||
|
|
|
||
|
|
if abs(roi - 181.81) < 0.2:
|
||
|
|
print("\nGOLD CERTIFICATION: SUCCESS ✓")
|
||
|
|
else:
|
||
|
|
print(f"\nGOLD CERTIFICATION: FAILED ✗ (Diff: {roi-181.81:+.2f}pp)")
|
||
|
|
|
||
|
|
if __name__ == "__main__":
|
||
|
|
replicate()
|