initial: import DOLPHIN baseline 2026-04-21 from dolphinng5_predict working tree

Includes core prod + GREEN/BLUE subsystems:
- prod/ (BLUE harness, configs, scripts, docs)
- nautilus_dolphin/ (GREEN Nautilus-native impl + dvae/ preserved)
- adaptive_exit/ (AEM engine + models/bucket_assignments.pkl)
- Observability/ (EsoF advisor, TUI, dashboards)
- external_factors/ (EsoF producer)
- mc_forewarning_qlabs_fork/ (MC regime/envelope)

Excludes runtime caches, logs, backups, and reproducible artifacts per .gitignore.
This commit is contained in:
hjnormey
2026-04-21 16:58:38 +02:00
commit 01c19662cb
643 changed files with 260241 additions and 0 deletions

View File

@@ -0,0 +1 @@
"""Test suite for DOLPHIN NG HD Nautilus system."""

View File

@@ -0,0 +1,7 @@
import sys
from pathlib import Path
# Add project root to sys.path so `nautilus_dolphin` package is importable
_ROOT = Path(__file__).resolve().parent.parent
if str(_ROOT) not in sys.path:
sys.path.insert(0, str(_ROOT))

View File

@@ -0,0 +1,16 @@
from pathlib import Path
import sys
print(f"File: {__file__}")
hcm_dir = Path(__file__).parent.parent.parent
print(f"HCM_DIR: {hcm_dir}")
nd_dir = hcm_dir / "nautilus_dolphin"
print(f"ND_DIR: {nd_dir}")
sys.path.insert(0, str(nd_dir))
print(f"Path: {sys.path[:1]}")
try:
from nautilus_dolphin.nautilus.dolphin_actor import DolphinActor
print("Import: SUCCESS")
except ImportError as e:
print(f"Import: ERROR ({e})")

View File

@@ -0,0 +1,257 @@
"""
Test 0: Nautilus Bootstrap - Sine Qua Non
=========================================
This test MUST pass before any other Nautilus-Dolphin tests.
It verifies that:
1. Nautilus Trader is installed and functional
2. Basic Nautilus components can be imported
3. All N-Dolphin Nautilus-dependent components can be imported
This is the foundation test - if this fails, nothing else will work.
"""
import pytest
import sys
def test_nautilus_trader_installed():
"""
Test 0.0: Verify Nautilus Trader is installed.
This is the most basic check - can we import Nautilus?
"""
try:
import nautilus_trader
from nautilus_trader.common.component import LiveClock, Logger
from nautilus_trader.common import Environment
from nautilus_trader.system.kernel import NautilusKernel
from nautilus_trader.system.config import NautilusKernelConfig
from nautilus_trader.model.identifiers import TraderId
from nautilus_trader.core.uuid import UUID4
print("[Test 0.0] Nautilus Trader imports: SUCCESS")
print(f"[Test 0.0] Nautilus Trader version: {nautilus_trader.__version__}")
except ImportError as e:
pytest.fail(f"Nautilus Trader not installed: {e}. Run: pip install nautilus_trader")
def test_nautilus_basic_config():
"""
Test 0.1: Verify NautilusKernel config can be created.
"""
from nautilus_trader.common import Environment
from nautilus_trader.model.identifiers import TraderId
from nautilus_trader.system.config import NautilusKernelConfig
config = NautilusKernelConfig(
environment=Environment.BACKTEST,
trader_id=TraderId("DOLPHIN-001"),
)
assert config is not None
assert config.environment == Environment.BACKTEST
print("[Test 0.1] NautilusKernelConfig creation: SUCCESS")
def test_nautilus_dolphin_core_imports():
"""
Test 0.2: Import all N-Dolphin core components.
These components don't depend on Nautilus.
"""
from nautilus_dolphin.nautilus.circuit_breaker import CircuitBreakerManager
from nautilus_dolphin.nautilus.metrics_monitor import MetricsMonitor
from nautilus_dolphin.nautilus.adaptive_circuit_breaker import (
AdaptiveCircuitBreaker, ACBConfig, ACBPositionSizer
)
# Verify they work
acb = AdaptiveCircuitBreaker()
sizer = ACBPositionSizer()
assert acb is not None
assert sizer is not None
print("[Test 0.2] N-Dolphin core component imports: SUCCESS")
def test_nautilus_dolphin_strategy_import():
"""
Test 0.3: Import DolphinExecutionStrategy class.
Note: Instantiation requires Nautilus StrategyConfig.
"""
from nautilus_dolphin.nautilus.strategy import DolphinExecutionStrategy
assert DolphinExecutionStrategy is not None
print("[Test 0.3] DolphinExecutionStrategy import: SUCCESS")
def test_nautilus_dolphin_signal_bridge_import():
"""
Test 0.4: Import SignalBridgeActor class.
"""
from nautilus_dolphin.nautilus.signal_bridge import SignalBridgeActor
assert SignalBridgeActor is not None
print("[Test 0.4] SignalBridgeActor import: SUCCESS")
def test_nautilus_dolphin_exec_algorithm_import():
"""
Test 0.5: Import SmartExecAlgorithm class.
"""
from nautilus_dolphin.nautilus.smart_exec_algorithm import SmartExecAlgorithm
assert SmartExecAlgorithm is not None
print("[Test 0.5] SmartExecAlgorithm import: SUCCESS")
def test_nautilus_dolphin_other_imports():
"""
Test 0.6: Import remaining Nautilus-dependent components.
"""
from nautilus_dolphin.nautilus.position_manager import PositionManager
from nautilus_dolphin.nautilus.volatility_detector import VolatilityRegimeDetector
from nautilus_dolphin.nautilus.data_adapter import JSONEigenvalueDataAdapter, BacktestDataLoader
print("[Test 0.6] Other Nautilus component imports: SUCCESS")
def test_acb_functionality():
"""
Test 0.7: Verify ACB works independently.
"""
from nautilus_dolphin.nautilus.adaptive_circuit_breaker import (
AdaptiveCircuitBreaker, ACBPositionSizer
)
acb = AdaptiveCircuitBreaker()
sizer = ACBPositionSizer()
# Test cut calculation for a date
final_size, acb_info = acb.apply_cut_to_position_size(
base_size=1000.0,
date_str="2025-01-01"
)
# Verify cut rate is valid
assert final_size >= 0
assert acb_info['cut'] in [0.0, 0.15, 0.45, 0.55, 0.75, 0.8]
# Test position sizing with ACB
sized_size, sizing_info = sizer.calculate_size(
base_size=1000.0,
date_str="2025-01-01"
)
assert sized_size >= 0
assert sized_size <= 1000.0 # Should be reduced by cut
print("[Test 0.7] ACB functionality: SUCCESS")
def test_circuit_breaker_functionality():
"""
Test 0.8: Verify CircuitBreaker works independently.
"""
from nautilus_dolphin.nautilus.circuit_breaker import CircuitBreakerManager
cb = CircuitBreakerManager()
# Test basic functionality
assert not cb.is_tripped() # Should not be tripped initially
# Test position opening (returns tuple[bool, str])
can_open, reason = cb.can_open_position(
asset="BTCUSDT",
current_balance=10000.0
)
assert isinstance(can_open, bool)
assert isinstance(reason, str)
# Test status
status = cb.get_status()
assert 'is_tripped' in status
assert 'active_positions' in status
print("[Test 0.8] CircuitBreaker functionality: SUCCESS")
def test_launcher_import():
"""
Test 0.9: Verify launcher is importable.
"""
from nautilus_dolphin.nautilus.launcher import NautilusDolphinLauncher
assert NautilusDolphinLauncher is not None
print("[Test 0.9] Launcher import: SUCCESS")
def test_full_import_chain():
"""
Test 0.10: Full import chain - all components from top-level.
"""
from nautilus_dolphin import (
# Core
CircuitBreakerManager,
MetricsMonitor,
AdaptiveCircuitBreaker,
ACBConfig,
ACBPositionSizer,
# Nautilus-dependent classes
SignalBridgeActor,
DolphinExecutionStrategy,
SmartExecAlgorithm,
PositionManager,
VolatilityRegimeDetector,
# Launcher
NautilusDolphinLauncher,
)
print("[Test 0.10] Full import chain: SUCCESS")
print("\n" + "=" * 80)
print("ALL TESTS PASSED - NAUTILUS-DOLPHIN SYSTEM IS BOOTABLE")
print("=" * 80)
if __name__ == '__main__':
print("=" * 80)
print("NAUTILUS-DOLPHIN BOOTSTRAP TEST (Test 0)")
print("=" * 80)
print()
# Run tests in sequence
tests = [
("0.0", test_nautilus_trader_installed, "Nautilus Trader installed"),
("0.1", test_nautilus_basic_config, "Nautilus kernel config"),
("0.2", test_nautilus_dolphin_core_imports, "Core component imports"),
("0.3", test_nautilus_dolphin_strategy_import, "Strategy import"),
("0.4", test_nautilus_dolphin_signal_bridge_import, "SignalBridge import"),
("0.5", test_nautilus_dolphin_exec_algorithm_import, "ExecAlgorithm import"),
("0.6", test_nautilus_dolphin_other_imports, "Other component imports"),
("0.7", test_acb_functionality, "ACB functionality"),
("0.8", test_circuit_breaker_functionality, "CircuitBreaker functionality"),
("0.9", test_launcher_import, "Launcher import"),
("0.10", test_full_import_chain, "Full import chain"),
]
passed = 0
failed = 0
for num, test_func, desc in tests:
try:
test_func()
print(f"[PASS] Test {num}: {desc}")
passed += 1
except Exception as e:
print(f"[FAIL] Test {num}: {desc}")
print(f" Error: {e}")
failed += 1
print()
print("=" * 80)
print(f"RESULTS: {passed} passed, {failed} failed")
print("=" * 80)
if failed > 0:
sys.exit(1)

View File

@@ -0,0 +1,602 @@
"""
ACB Nautilus vs Reference Implementation - Identity Test
=========================================================
Verifies that the Nautilus-Dolphin ACB implementation produces
IDENTICAL results to the tested reference implementation.
This test runs WITHOUT requiring Nautilus Trader to be installed,
using only the core ACB logic.
"""
import unittest
import sys
from pathlib import Path
from datetime import datetime
from dataclasses import dataclass
from typing import Dict, Tuple
# Add parent to path
sys.path.insert(0, str(Path(__file__).parent.parent))
# ============================================================================
# REFERENCE IMPLEMENTATION (Copied from tested ACB)
# ============================================================================
@dataclass
class ReferenceACBConfig:
"""Reference ACB v5 Configuration - THE GROUND TRUTH."""
CUT_RATES = {
0: 0.00,
1: 0.15,
2: 0.45,
3: 0.55,
4: 0.75,
5: 0.80,
}
FUNDING_VERY_BEARISH = -0.0001
FUNDING_BEARISH = 0.0
DVOL_EXTREME = 80
DVOL_ELEVATED = 55
FNG_EXTREME_FEAR = 25
FNG_FEAR = 40
TAKER_SELLING = 0.8
TAKER_MILD_SELLING = 0.9
class ReferenceAdaptiveCircuitBreaker:
"""
Reference implementation - THE GROUND TRUTH.
This is the tested, validated implementation.
"""
def __init__(self):
self.config = ReferenceACBConfig()
def calculate_signals(self, factors: Dict) -> Dict:
"""Calculate signals - REFERENCE implementation."""
signals = 0.0
severity = 0
# Funding
funding = factors.get('funding_btc', 0)
if funding < self.config.FUNDING_VERY_BEARISH:
signals += 1.0
severity += 2
elif funding < self.config.FUNDING_BEARISH:
signals += 0.5
severity += 1
# DVOL
dvol = factors.get('dvol_btc', 50)
if dvol > self.config.DVOL_EXTREME:
signals += 1.0
severity += 2
elif dvol > self.config.DVOL_ELEVATED:
signals += 0.5
severity += 1
# FNG
fng = factors.get('fng', 50)
if fng < self.config.FNG_EXTREME_FEAR:
if signals >= 1:
signals += 1.0
severity += 2
elif fng < self.config.FNG_FEAR:
if signals >= 0.5:
signals += 0.5
severity += 1
# Taker
taker = factors.get('taker', 1.0)
if taker < self.config.TAKER_SELLING:
signals += 1.0
severity += 1
elif taker < self.config.TAKER_MILD_SELLING:
signals += 0.5
return {'signals': signals, 'severity': severity}
def get_cut_from_signals(self, signals: float) -> float:
"""Map signals to cut - REFERENCE implementation."""
if signals >= 5.0:
return self.config.CUT_RATES[5]
elif signals >= 4.0:
return self.config.CUT_RATES[4]
elif signals >= 3.0:
return self.config.CUT_RATES[3]
elif signals >= 2.0:
return self.config.CUT_RATES[2]
elif signals >= 1.0:
return self.config.CUT_RATES[1]
else:
return self.config.CUT_RATES[0]
def get_cut_for_factors(self, factors: Dict) -> Dict:
"""Get complete cut info - REFERENCE implementation."""
signal_info = self.calculate_signals(factors)
cut = self.get_cut_from_signals(signal_info['signals'])
return {
'cut': cut,
'signals': signal_info['signals'],
'severity': signal_info['severity'],
'factors': factors
}
# ============================================================================
# NAUTILUS IMPLEMENTATION (Import the actual Nautilus ACB)
# ============================================================================
# Try to import Nautilus ACB
try:
from nautilus_dolphin.nautilus_dolphin.nautilus.adaptive_circuit_breaker import (
AdaptiveCircuitBreaker as NautilusACB,
ACBConfig as NautilusACBConfig
)
NAUTILUS_AVAILABLE = True
except ImportError as e:
print(f"Warning: Nautilus ACB not available: {e}")
NAUTILUS_AVAILABLE = False
# Create placeholder for testing
class NautilusACB:
pass
class NautilusACBConfig:
pass
# ============================================================================
# TEST CASES
# ============================================================================
class TestACBIdentity(unittest.TestCase):
"""
Verify Nautilus ACB produces IDENTICAL results to reference.
"""
@classmethod
def setUpClass(cls):
"""Set up both implementations."""
cls.reference = ReferenceAdaptiveCircuitBreaker()
if NAUTILUS_AVAILABLE:
cls.nautilus = NautilusACB()
else:
cls.nautilus = None
def _compare_results(self, factors: Dict, test_name: str):
"""Compare reference vs Nautilus results."""
# Get reference result
ref_result = self.reference.get_cut_for_factors(factors)
# Get Nautilus result (if available)
if self.nautilus is None:
self.skipTest("Nautilus ACB not available")
# Mock the external factor loading
self.nautilus._load_external_factors = lambda date: factors
naut_result = self.nautilus.get_cut_for_date('2026-02-06')
# Compare cut rates (MUST be identical)
self.assertAlmostEqual(
ref_result['cut'], naut_result['cut'],
places=6,
msg=f"{test_name}: Cut rate mismatch - "
f"Ref: {ref_result['cut']}, Naut: {naut_result['cut']}"
)
# Compare signals (MUST be identical)
self.assertAlmostEqual(
ref_result['signals'], naut_result['signals'],
places=6,
msg=f"{test_name}: Signal count mismatch - "
f"Ref: {ref_result['signals']}, Naut: {naut_result['signals']}"
)
return ref_result, naut_result
# -------------------------------------------------------------------------
# Test Case 1: No Stress (0 signals)
# -------------------------------------------------------------------------
def test_no_stress_all_normal(self):
"""Test normal market conditions - 0 signals expected."""
factors = {
'funding_btc': 0.0001, # Positive (bullish)
'dvol_btc': 40.0, # Low volatility
'fng': 60, # Greed
'taker': 1.1 # Buying pressure
}
ref, naut = self._compare_results(factors, "No Stress")
self.assertEqual(ref['signals'], 0.0)
self.assertEqual(ref['cut'], 0.0)
print(f" No Stress: signals={ref['signals']:.1f}, cut={ref['cut']*100:.0f}% - MATCH")
# -------------------------------------------------------------------------
# Test Case 2: Single Signal Variations
# -------------------------------------------------------------------------
def test_funding_stress_only(self):
"""Test funding stress only - 1 signal expected."""
factors = {
'funding_btc': -0.00015, # Very bearish
'dvol_btc': 40.0,
'fng': 60,
'taker': 1.1
}
ref, naut = self._compare_results(factors, "Funding Stress")
self.assertEqual(ref['signals'], 1.0)
self.assertEqual(ref['cut'], 0.15)
print(f" Funding Stress: signals={ref['signals']:.1f}, cut={ref['cut']*100:.0f}% - MATCH")
def test_dvol_stress_only(self):
"""Test DVOL stress only - 1 signal expected."""
factors = {
'funding_btc': 0.0001,
'dvol_btc': 85.0, # Extreme volatility
'fng': 60,
'taker': 1.1
}
ref, naut = self._compare_results(factors, "DVOL Stress")
self.assertEqual(ref['signals'], 1.0)
self.assertEqual(ref['cut'], 0.15)
print(f" DVOL Stress: signals={ref['signals']:.1f}, cut={ref['cut']*100:.0f}% - MATCH")
def test_taker_stress_only(self):
"""Test taker stress only - 1 signal expected."""
factors = {
'funding_btc': 0.0001,
'dvol_btc': 40.0,
'fng': 60,
'taker': 0.75 # Selling pressure
}
ref, naut = self._compare_results(factors, "Taker Stress")
self.assertEqual(ref['signals'], 1.0)
self.assertEqual(ref['cut'], 0.15)
print(f" Taker Stress: signals={ref['signals']:.1f}, cut={ref['cut']*100:.0f}% - MATCH")
# -------------------------------------------------------------------------
# Test Case 3: FNG Confirmation Logic
# -------------------------------------------------------------------------
def test_fng_no_confirmation(self):
"""Test FNG without confirmation - should NOT count."""
factors = {
'funding_btc': 0.0001, # No other signals
'dvol_btc': 40.0,
'fng': 20, # Extreme fear (but no confirmation)
'taker': 1.1
}
ref, naut = self._compare_results(factors, "FNG No Confirmation")
# FNG requires confirmation, so should be 0 signals
self.assertEqual(ref['signals'], 0.0)
self.assertEqual(ref['cut'], 0.0)
print(f" FNG No Conf: signals={ref['signals']:.1f}, cut={ref['cut']*100:.0f}% - MATCH")
def test_fng_with_confirmation(self):
"""Test FNG WITH confirmation - requires signals >= 1 from other factors."""
factors = {
'funding_btc': -0.00015, # Strong funding signal (1.0) to confirm FNG
'dvol_btc': 40.0,
'fng': 20, # Extreme fear (confirmed by funding)
'taker': 1.1
}
ref, naut = self._compare_results(factors, "FNG With Confirmation")
# 1.0 from funding + 1.0 from confirmed FNG = 2.0 signals
self.assertEqual(ref['signals'], 2.0)
self.assertEqual(ref['cut'], 0.45) # 2 signals = 45% cut
print(f" FNG With Conf: signals={ref['signals']:.1f}, cut={ref['cut']*100:.0f}% - MATCH")
# -------------------------------------------------------------------------
# Test Case 4: Two Signals (45% Cut)
# -------------------------------------------------------------------------
def test_two_signals_funding_dvol(self):
"""Test funding + DVOL stress - 2 signals expected."""
factors = {
'funding_btc': -0.00015, # Very bearish (1.0)
'dvol_btc': 85.0, # Extreme (1.0)
'fng': 60,
'taker': 1.1
}
ref, naut = self._compare_results(factors, "Two Signals (Funding+DVOL)")
self.assertEqual(ref['signals'], 2.0)
self.assertEqual(ref['cut'], 0.45)
print(f" 2 Signals: signals={ref['signals']:.1f}, cut={ref['cut']*100:.0f}% - MATCH")
def test_two_signals_funding_taker(self):
"""Test funding + taker stress - 2 signals expected."""
factors = {
'funding_btc': -0.00015, # Very bearish (1.0)
'dvol_btc': 40.0,
'fng': 60,
'taker': 0.75 # Selling (1.0)
}
ref, naut = self._compare_results(factors, "Two Signals (Funding+Taker)")
self.assertEqual(ref['signals'], 2.0)
self.assertEqual(ref['cut'], 0.45)
print(f" 2 Signals: signals={ref['signals']:.1f}, cut={ref['cut']*100:.0f}% - MATCH")
# -------------------------------------------------------------------------
# Test Case 5: Three Signals (55% Cut - Crash Level)
# -------------------------------------------------------------------------
def test_three_signals_feb6_scenario(self):
"""
Test Feb 6, 2026 scenario - THE CRASH DAY.
Actual values from Feb 6:
- Funding: -0.000137 (very bearish)
- DVOL: 58.9 (elevated)
- FNG: 14 (extreme fear - confirmed)
- Taker: ~0.85 (mild selling)
Expected: 3 signals, 55% cut
"""
factors = {
'funding_btc': -0.000137, # Very bearish (1.0)
'dvol_btc': 58.9, # Elevated (0.5)
'fng': 14, # Extreme fear, confirmed (1.0)
'taker': 0.85 # Mild selling (0.5)
}
ref, naut = self._compare_results(factors, "Feb 6 Crash Scenario")
# 1.0 + 0.5 + 1.0 + 0.5 = 3.0 signals
self.assertEqual(ref['signals'], 3.0)
self.assertEqual(ref['cut'], 0.55)
print(f" Feb 6: signals={ref['signals']:.1f}, cut={ref['cut']*100:.0f}% - MATCH")
# -------------------------------------------------------------------------
# Test Case 6: Four+ Signals (75-80% Cut)
# -------------------------------------------------------------------------
def test_four_signals_extreme(self):
"""Test extreme stress - 4 signals expected."""
factors = {
'funding_btc': -0.0002, # Very bearish (1.0)
'dvol_btc': 95.0, # Extreme (1.0)
'fng': 10, # Extreme fear, confirmed (1.0)
'taker': 0.7 # Strong selling (1.0)
}
ref, naut = self._compare_results(factors, "Four Signals (Extreme)")
self.assertEqual(ref['signals'], 4.0)
self.assertEqual(ref['cut'], 0.75)
print(f" 4 Signals: signals={ref['signals']:.1f}, cut={ref['cut']*100:.0f}% - MATCH")
def test_five_signals_apocalypse(self):
"""Test apocalyptic stress - 5+ signals expected."""
# Add even more extreme conditions
factors = {
'funding_btc': -0.0003, # Extremely bearish (1.0)
'dvol_btc': 100.0, # Max volatility (1.0)
'fng': 5, # Max fear, confirmed (1.0)
'taker': 0.6 # Extreme selling (1.0)
}
ref, naut = self._compare_results(factors, "Five+ Signals (Apocalypse)")
self.assertGreaterEqual(ref['signals'], 4.0)
self.assertIn(ref['cut'], [0.75, 0.80])
print(f" 5+ Signals: signals={ref['signals']:.1f}, cut={ref['cut']*100:.0f}% - MATCH")
# -------------------------------------------------------------------------
# Test Case 7: Edge Cases
# -------------------------------------------------------------------------
def test_boundary_funding_exact(self):
"""Test exact funding boundary (-0.0001)."""
factors = {
'funding_btc': -0.0001, # Exactly at threshold
'dvol_btc': 40.0,
'fng': 60,
'taker': 1.1
}
ref, naut = self._compare_results(factors, "Funding Boundary (-0.0001)")
# -0.0001 is the threshold, should trigger 1 signal
self.assertEqual(ref['signals'], 1.0)
self.assertEqual(ref['cut'], 0.15)
print(f" Boundary: signals={ref['signals']:.1f}, cut={ref['cut']*100:.0f}% - MATCH")
def test_boundary_dvol_exact(self):
"""Test exact DVOL boundary (55 and 80)."""
# At 55 threshold
factors = {
'funding_btc': 0.0001,
'dvol_btc': 55.0, # Exactly at elevated threshold
'fng': 60,
'taker': 1.1
}
ref, naut = self._compare_results(factors, "DVOL Boundary (55)")
# 55 is elevated threshold, should trigger 0.5 signal
self.assertEqual(ref['signals'], 0.5)
print(f" DVOL 55: signals={ref['signals']:.1f} - MATCH")
# At 80 threshold
factors['dvol_btc'] = 80.0 # Exactly at extreme threshold
ref, naut = self._compare_results(factors, "DVOL Boundary (80)")
# 80 is extreme threshold, should trigger 1.0 signal
self.assertEqual(ref['signals'], 1.0)
self.assertEqual(ref['cut'], 0.15)
print(f" DVOL 80: signals={ref['signals']:.1f}, cut={ref['cut']*100:.0f}% - MATCH")
def test_signal_thresholds_exact(self):
"""Test exact signal count thresholds (1.0, 2.0, 3.0, 4.0, 5.0)."""
test_cases = [
(1.0, 0.15, "Exactly 1 signal"),
(2.0, 0.45, "Exactly 2 signals"),
(3.0, 0.55, "Exactly 3 signals"),
(4.0, 0.75, "Exactly 4 signals"),
(5.0, 0.80, "Exactly 5 signals"),
]
for signals, expected_cut, description in test_cases:
ref_cut = self.reference.get_cut_from_signals(signals)
if self.nautilus:
naut_cut = self.nautilus._get_cut_from_signals(signals)
self.assertEqual(ref_cut, naut_cut,
f"{description}: Ref={ref_cut}, Naut={naut_cut}")
self.assertEqual(ref_cut, expected_cut,
f"{description}: Expected {expected_cut}, got {ref_cut}")
print(f" {description}: cut={ref_cut*100:.0f}% - MATCH")
class TestACBPositionSizing(unittest.TestCase):
"""Test position sizing with ACB."""
@classmethod
def setUpClass(cls):
"""Set up implementations."""
cls.reference = ReferenceAdaptiveCircuitBreaker()
def test_position_sizing_calculation(self):
"""Test that position sizing math is correct."""
base_size = 1000.0
test_cases = [
(0.0, 1000.0, "0% cut"),
(0.15, 850.0, "15% cut"),
(0.45, 550.0, "45% cut"),
(0.55, 450.0, "55% cut"),
(0.75, 250.0, "75% cut"),
(0.80, 200.0, "80% cut"),
]
for cut, expected_size, description in test_cases:
actual_size = base_size * (1 - cut)
self.assertAlmostEqual(
actual_size, expected_size,
places=2,
msg=f"{description}: Expected ${expected_size}, got ${actual_size}"
)
print(f" {description}: ${base_size} * (1 - {cut}) = ${actual_size} - MATCH")
class TestConfigurationIdentity(unittest.TestCase):
"""Test that configurations are identical."""
def test_cut_rates_identical(self):
"""Verify cut rates are identical between implementations."""
ref_config = ReferenceACBConfig()
if not NAUTILUS_AVAILABLE:
self.skipTest("Nautilus not available")
naut_config = NautilusACBConfig()
for signals, ref_cut in ref_config.CUT_RATES.items():
naut_cut = naut_config.CUT_RATES.get(signals)
self.assertIsNotNone(naut_cut, f"Nautilus missing cut rate for {signals} signals")
self.assertEqual(ref_cut, naut_cut,
f"Cut rate mismatch at {signals} signals: Ref={ref_cut}, Naut={naut_cut}")
print(f" {signals} signals: Ref={ref_cut}, Naut={naut_cut} - MATCH")
def test_thresholds_identical(self):
"""Verify thresholds are identical."""
ref_config = ReferenceACBConfig()
if not NAUTILUS_AVAILABLE:
self.skipTest("Nautilus not available")
naut_config = NautilusACBConfig()
thresholds = [
('FUNDING_VERY_BEARISH', 'FUNDING_VERY_BEARISH'),
('FUNDING_BEARISH', 'FUNDING_BEARISH'),
('DVOL_EXTREME', 'DVOL_EXTREME'),
('DVOL_ELEVATED', 'DVOL_ELEVATED'),
('FNG_EXTREME_FEAR', 'FNG_EXTREME_FEAR'),
('FNG_FEAR', 'FNG_FEAR'),
('TAKER_SELLING', 'TAKER_SELLING'),
('TAKER_MILD_SELLING', 'TAKER_MILD_SELLING'),
]
for ref_attr, naut_attr in thresholds:
ref_val = getattr(ref_config, ref_attr)
naut_val = getattr(naut_config, naut_attr)
self.assertEqual(ref_val, naut_val,
f"Threshold mismatch for {ref_attr}: Ref={ref_val}, Naut={naut_val}")
print(f" {ref_attr}: Ref={ref_val}, Naut={naut_val} - MATCH")
def run_identity_tests():
"""Run all identity tests and print summary."""
print("=" * 80)
print("ACB NAUTILUS vs REFERENCE - IDENTITY TEST SUITE")
print("=" * 80)
print()
# Create test suite
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromTestCase(TestACBIdentity))
suite.addTests(loader.loadTestsFromTestCase(TestACBPositionSizing))
suite.addTests(loader.loadTestsFromTestCase(TestConfigurationIdentity))
# Run tests
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(suite)
# Print summary
print()
print("=" * 80)
print("TEST SUMMARY")
print("=" * 80)
print(f"Tests Run: {result.testsRun}")
print(f"Failures: {len(result.failures)}")
print(f"Errors: {len(result.errors)}")
print(f"Skipped: {len(result.skipped)}")
if result.wasSuccessful():
print()
print("✓ ALL TESTS PASSED - NAUTILUS ACB PRODUCES IDENTICAL RESULTS TO REFERENCE")
print()
print("The Nautilus-Dolphin ACB implementation is verified to be:")
print(" - Mathematically identical to the tested reference")
print(" - Producing the same cut rates for all signal combinations")
print(" - Using the same thresholds and logic")
print(" - Safe for production deployment")
else:
print()
print("✗ TESTS FAILED - REVIEW IMPLEMENTATION")
print()
if result.failures:
print("Failures:")
for test, trace in result.failures:
print(f" - {test}")
if result.errors:
print("Errors:")
for test, trace in result.errors:
print(f" - {test}")
return result.wasSuccessful()
if __name__ == '__main__':
success = run_identity_tests()
sys.exit(0 if success else 1)

View File

@@ -0,0 +1,407 @@
"""
ACB Standalone Identity Test
============================
Tests the ACB implementation WITHOUT requiring Nautilus Trader.
Directly imports the adaptive_circuit_breaker module.
"""
import unittest
import sys
from pathlib import Path
from dataclasses import dataclass
from typing import Dict
# Add parent to path
sys.path.insert(0, str(Path(__file__).parent.parent))
# ============================================================================
# REFERENCE IMPLEMENTATION (THE GROUND TRUTH)
# ============================================================================
@dataclass
class ReferenceACBConfig:
"""Reference configuration."""
CUT_RATES = {0: 0.00, 1: 0.15, 2: 0.45, 3: 0.55, 4: 0.75, 5: 0.80}
FUNDING_VERY_BEARISH = -0.0001
FUNDING_BEARISH = 0.0
DVOL_EXTREME = 80
DVOL_ELEVATED = 55
FNG_EXTREME_FEAR = 25
FNG_FEAR = 40
TAKER_SELLING = 0.8
TAKER_MILD_SELLING = 0.9
class ReferenceACB:
"""Reference implementation."""
def __init__(self):
self.config = ReferenceACBConfig()
def calculate_signals(self, factors: Dict) -> Dict:
signals = 0.0
severity = 0
funding = factors.get('funding_btc', 0)
if funding < self.config.FUNDING_VERY_BEARISH:
signals += 1.0; severity += 2
elif funding < self.config.FUNDING_BEARISH:
signals += 0.5; severity += 1
dvol = factors.get('dvol_btc', 50)
if dvol > self.config.DVOL_EXTREME:
signals += 1.0; severity += 2
elif dvol > self.config.DVOL_ELEVATED:
signals += 0.5; severity += 1
fng = factors.get('fng', 50)
if fng < self.config.FNG_EXTREME_FEAR:
if signals >= 1:
signals += 1.0; severity += 2
elif fng < self.config.FNG_FEAR:
if signals >= 0.5:
signals += 0.5; severity += 1
taker = factors.get('taker', 1.0)
if taker < self.config.TAKER_SELLING:
signals += 1.0; severity += 1
elif taker < self.config.TAKER_MILD_SELLING:
signals += 0.5
return {'signals': signals, 'severity': severity}
def get_cut_from_signals(self, signals: float) -> float:
if signals >= 5.0: return self.config.CUT_RATES[5]
elif signals >= 4.0: return self.config.CUT_RATES[4]
elif signals >= 3.0: return self.config.CUT_RATES[3]
elif signals >= 2.0: return self.config.CUT_RATES[2]
elif signals >= 1.0: return self.config.CUT_RATES[1]
else: return self.config.CUT_RATES[0]
# ============================================================================
# IMPORT NAUTILUS ACB DIRECTLY (bypass __init__.py)
# ============================================================================
# Import the ACB module directly without going through the package __init__.py
import importlib.util
acb_module_path = Path(__file__).parent.parent / 'nautilus_dolphin' / 'nautilus' / 'adaptive_circuit_breaker.py'
spec = importlib.util.spec_from_file_location("adaptive_circuit_breaker", acb_module_path)
acb_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(acb_module)
NautilusACB = acb_module.AdaptiveCircuitBreaker
NautilusACBConfig = acb_module.ACBConfig
NautilusACBPositionSizer = acb_module.ACBPositionSizer
print(f"Loaded Nautilus ACB from: {acb_module_path}")
print(f"Nautilus ACB class: {NautilusACB}")
# ============================================================================
# IDENTITY TESTS
# ============================================================================
class TestACBSignalCalculation(unittest.TestCase):
"""Test signal calculation identity."""
@classmethod
def setUpClass(cls):
cls.reference = ReferenceACB()
cls.nautilus = NautilusACB()
def assert_signals_equal(self, factors, test_name):
"""Assert signals are equal between implementations."""
ref_result = self.reference.calculate_signals(factors)
naut_result = self.nautilus._calculate_signals(factors)
self.assertAlmostEqual(
ref_result['signals'], naut_result['signals'],
places=6, msg=f"{test_name}: signals mismatch"
)
self.assertEqual(
ref_result['severity'], naut_result['severity'],
msg=f"{test_name}: severity mismatch"
)
return ref_result
def test_1_no_stress(self):
"""No stress: 0 signals."""
factors = {'funding_btc': 0.0001, 'dvol_btc': 40.0, 'fng': 60, 'taker': 1.1}
result = self.assert_signals_equal(factors, "No Stress")
self.assertEqual(result['signals'], 0.0)
print("[PASS] No stress: 0 signals")
def test_2_funding_stress(self):
"""Funding stress only: 1 signal."""
factors = {'funding_btc': -0.00015, 'dvol_btc': 40.0, 'fng': 60, 'taker': 1.1}
result = self.assert_signals_equal(factors, "Funding Stress")
self.assertEqual(result['signals'], 1.0)
print("[PASS] Funding stress: 1 signal")
def test_3_dvol_stress(self):
"""DVOL stress only: 1 signal."""
factors = {'funding_btc': 0.0001, 'dvol_btc': 85.0, 'fng': 60, 'taker': 1.1}
result = self.assert_signals_equal(factors, "DVOL Stress")
self.assertEqual(result['signals'], 1.0)
print("[PASS] DVOL stress: 1 signal")
def test_4_fng_no_confirmation(self):
"""FNG without confirmation: 0 signals."""
factors = {'funding_btc': 0.0001, 'dvol_btc': 40.0, 'fng': 20, 'taker': 1.1}
result = self.assert_signals_equal(factors, "FNG No Conf")
self.assertEqual(result['signals'], 0.0)
print("[PASS] FNG no confirmation: 0 signals")
def test_5_fng_with_confirmation(self):
"""FNG with confirmation: 1.5 signals (requires signals >= 1 from other factors)."""
# Need strong funding signal (1.0) to confirm FNG extreme fear (1.0)
factors = {'funding_btc': -0.00015, 'dvol_btc': 40.0, 'fng': 20, 'taker': 1.1}
result = self.assert_signals_equal(factors, "FNG With Conf")
self.assertEqual(result['signals'], 2.0) # 1.0 funding + 1.0 FNG confirmed
print("[PASS] FNG with confirmation: 2.0 signals")
def test_6_two_signals(self):
"""Two signals: 2.0."""
factors = {'funding_btc': -0.00015, 'dvol_btc': 85.0, 'fng': 60, 'taker': 1.1}
result = self.assert_signals_equal(factors, "Two Signals")
self.assertEqual(result['signals'], 2.0)
print("[PASS] Two signals: 2.0")
def test_7_feb6_scenario(self):
"""Feb 6 crash scenario: 3 signals."""
factors = {
'funding_btc': -0.000137,
'dvol_btc': 58.9,
'fng': 14,
'taker': 0.85
}
result = self.assert_signals_equal(factors, "Feb 6 Scenario")
self.assertEqual(result['signals'], 3.0)
print("[PASS] Feb 6 scenario: 3 signals")
def test_8_four_signals(self):
"""Four signals: 4.0."""
factors = {
'funding_btc': -0.0002,
'dvol_btc': 95.0,
'fng': 10,
'taker': 0.7
}
result = self.assert_signals_equal(factors, "Four Signals")
self.assertEqual(result['signals'], 4.0)
print("[PASS] Four signals: 4.0")
class TestACBCutMapping(unittest.TestCase):
"""Test cut rate mapping identity."""
@classmethod
def setUpClass(cls):
cls.reference = ReferenceACB()
cls.nautilus = NautilusACB()
def assert_cut_equal(self, signals, expected_cut, test_name):
"""Assert cut rates are equal."""
ref_cut = self.reference.get_cut_from_signals(signals)
naut_cut = self.nautilus._get_cut_from_signals(signals)
self.assertEqual(ref_cut, naut_cut,
f"{test_name}: Ref={ref_cut}, Naut={naut_cut}"
)
self.assertEqual(ref_cut, expected_cut,
f"{test_name}: Expected {expected_cut}, got {ref_cut}"
)
print(f"[PASS] {test_name}: {signals} signals -> {ref_cut*100:.0f}%")
def test_cut_0_signals(self):
self.assert_cut_equal(0.0, 0.00, "0 signals")
def test_cut_0_5_signals(self):
self.assert_cut_equal(0.5, 0.00, "0.5 signals")
def test_cut_1_signal(self):
self.assert_cut_equal(1.0, 0.15, "1 signal")
def test_cut_1_5_signals(self):
self.assert_cut_equal(1.5, 0.15, "1.5 signals")
def test_cut_2_signals(self):
self.assert_cut_equal(2.0, 0.45, "2 signals")
def test_cut_2_5_signals(self):
self.assert_cut_equal(2.5, 0.45, "2.5 signals")
def test_cut_3_signals(self):
self.assert_cut_equal(3.0, 0.55, "3 signals")
def test_cut_4_signals(self):
self.assert_cut_equal(4.0, 0.75, "4 signals")
def test_cut_5_signals(self):
self.assert_cut_equal(5.0, 0.80, "5 signals")
class TestACBConfiguration(unittest.TestCase):
"""Test configuration identity."""
def test_cut_rates_identical(self):
"""Verify cut rates are identical."""
ref_config = ReferenceACBConfig()
naut_config = NautilusACBConfig()
for signals, ref_cut in ref_config.CUT_RATES.items():
naut_cut = naut_config.CUT_RATES[signals]
self.assertEqual(ref_cut, naut_cut,
f"Cut rate mismatch at {signals}: Ref={ref_cut}, Naut={naut_cut}"
)
print(f"[PASS] Cut rate {signals}: {ref_cut} = {naut_cut}")
def test_thresholds_identical(self):
"""Verify thresholds are identical."""
ref_config = ReferenceACBConfig()
naut_config = NautilusACBConfig()
thresholds = [
('FUNDING_VERY_BEARISH', 'FUNDING_VERY_BEARISH'),
('DVOL_EXTREME', 'DVOL_EXTREME'),
('FNG_EXTREME_FEAR', 'FNG_EXTREME_FEAR'),
('TAKER_SELLING', 'TAKER_SELLING'),
]
for ref_attr, naut_attr in thresholds:
ref_val = getattr(ref_config, ref_attr)
naut_val = getattr(naut_config, naut_attr)
self.assertEqual(ref_val, naut_val,
f"{ref_attr}: Ref={ref_val}, Naut={naut_val}"
)
print(f"[PASS] Threshold {ref_attr}: {ref_val} = {naut_val}")
class TestACBIntegration(unittest.TestCase):
"""Test full integration."""
@classmethod
def setUpClass(cls):
cls.reference = ReferenceACB()
cls.nautilus = NautilusACB()
def test_end_to_end_no_stress(self):
"""End-to-end: no stress."""
factors = {'funding_btc': 0.0001, 'dvol_btc': 40.0, 'fng': 60, 'taker': 1.1}
ref_signals = self.reference.calculate_signals(factors)
ref_cut = self.reference.get_cut_from_signals(ref_signals['signals'])
naut_result = self.nautilus._calculate_signals(factors)
naut_cut = self.nautilus._get_cut_from_signals(naut_result['signals'])
self.assertEqual(ref_signals['signals'], naut_result['signals'])
self.assertEqual(ref_cut, naut_cut)
self.assertEqual(ref_cut, 0.0)
print("[PASS] E2E no stress: 0% cut")
def test_end_to_end_feb6(self):
"""End-to-end: Feb 6 crash."""
factors = {
'funding_btc': -0.000137,
'dvol_btc': 58.9,
'fng': 14,
'taker': 0.85
}
ref_signals = self.reference.calculate_signals(factors)
ref_cut = self.reference.get_cut_from_signals(ref_signals['signals'])
naut_result = self.nautilus._calculate_signals(factors)
naut_cut = self.nautilus._get_cut_from_signals(naut_result['signals'])
self.assertEqual(ref_signals['signals'], naut_result['signals'])
self.assertEqual(ref_cut, naut_cut)
self.assertEqual(ref_cut, 0.55)
print("[PASS] E2E Feb 6: 55% cut")
class TestPositionSizer(unittest.TestCase):
"""Test position sizer."""
def test_sizer_creation(self):
"""Test that position sizer can be created."""
sizer = NautilusACBPositionSizer()
self.assertIsNotNone(sizer)
self.assertTrue(sizer.is_enabled())
print("[PASS] Position sizer created")
def test_sizer_calculation(self):
"""Test position sizing calculation."""
sizer = NautilusACBPositionSizer()
# Mock the ACB to return known values
test_cases = [
(0.0, 1000.0, 1000.0),
(0.15, 1000.0, 850.0),
(0.45, 1000.0, 550.0),
(0.55, 1000.0, 450.0),
]
for cut, base, expected in test_cases:
# Direct calculation
result = base * (1 - cut)
self.assertAlmostEqual(result, expected, places=2)
print("[PASS] Position sizing calculations correct")
def run_tests():
"""Run all tests."""
print("=" * 80)
print("ACB NAUTILUS vs REFERENCE - STANDALONE IDENTITY TEST")
print("=" * 80)
print()
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromTestCase(TestACBSignalCalculation))
suite.addTests(loader.loadTestsFromTestCase(TestACBCutMapping))
suite.addTests(loader.loadTestsFromTestCase(TestACBConfiguration))
suite.addTests(loader.loadTestsFromTestCase(TestACBIntegration))
suite.addTests(loader.loadTestsFromTestCase(TestPositionSizer))
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(suite)
print()
print("=" * 80)
print("TEST SUMMARY")
print("=" * 80)
print(f"Tests Run: {result.testsRun}")
print(f"Failures: {len(result.failures)}")
print(f"Errors: {len(result.errors)}")
if result.wasSuccessful():
print()
print("[SUCCESS] ALL TESTS PASSED")
print()
print("The Nautilus ACB implementation is VERIFIED to be:")
print(" * Mathematically identical to the reference")
print(" * Producing the same signal calculations")
print(" * Using the same cut rate mappings")
print(" * Using identical configuration thresholds")
print(" * Safe for production deployment")
return True
else:
print()
print("[FAILURE] TESTS FAILED")
return False
if __name__ == '__main__':
success = run_tests()
sys.exit(0 if success else 1)

View File

@@ -0,0 +1,271 @@
"""
Tests for Adaptive Circuit Breaker v5.
"""
import unittest
from datetime import datetime
from unittest.mock import patch, MagicMock
import numpy as np
from nautilus_dolphin.nautilus.adaptive_circuit_breaker import (
AdaptiveCircuitBreaker, ACBConfig, ACBPositionSizer, get_acb_cut_for_date
)
class TestACBConfig(unittest.TestCase):
"""Test ACB configuration."""
def test_default_config(self):
"""Test default configuration values."""
config = ACBConfig()
# Check cut rates (v5 configuration)
self.assertEqual(config.CUT_RATES[0], 0.00)
self.assertEqual(config.CUT_RATES[1], 0.15)
self.assertEqual(config.CUT_RATES[2], 0.45)
self.assertEqual(config.CUT_RATES[3], 0.55)
self.assertEqual(config.CUT_RATES[4], 0.75)
self.assertEqual(config.CUT_RATES[5], 0.80)
# Check thresholds
self.assertEqual(config.FUNDING_VERY_BEARISH, -0.0001)
self.assertEqual(config.DVOL_EXTREME, 80)
self.assertEqual(config.FNG_EXTREME_FEAR, 25)
class TestAdaptiveCircuitBreaker(unittest.TestCase):
"""Test Adaptive Circuit Breaker functionality."""
def setUp(self):
"""Set up test fixtures."""
self.acb = AdaptiveCircuitBreaker()
def test_signal_calculation_no_stress(self):
"""Test signal calculation with no stress factors."""
factors = {
'funding_btc': 0.0001, # Positive funding (bullish)
'dvol_btc': 40.0, # Low volatility
'fng': 60, # Greed (bullish)
'taker': 1.1 # Buying pressure
}
result = self.acb._calculate_signals(factors)
self.assertEqual(result['signals'], 0.0)
self.assertEqual(result['severity'], 0)
def test_signal_calculation_funding_only(self):
"""Test signal calculation with funding stress only."""
factors = {
'funding_btc': -0.00015, # Very bearish funding
'dvol_btc': 40.0,
'fng': 60,
'taker': 1.1
}
result = self.acb._calculate_signals(factors)
self.assertEqual(result['signals'], 1.0) # 1 signal from funding
self.assertEqual(result['severity'], 2)
def test_signal_calculation_multiple_stress(self):
"""Test signal calculation with multiple stress factors."""
factors = {
'funding_btc': -0.00015, # Very bearish funding (1.0 signals, sev 2)
'dvol_btc': 85.0, # Extreme volatility (1.0 signals, sev 2)
'fng': 20, # Extreme fear (confirmed, 1.0 signals, sev 2)
'taker': 0.75 # Selling pressure (1.0 signals, sev 1)
}
result = self.acb._calculate_signals(factors)
# Should have 4 signals (funding + dvol + fng + taker)
self.assertGreaterEqual(result['signals'], 3.0)
self.assertGreater(result['severity'], 0)
def test_cut_mapping(self):
"""Test signal to cut rate mapping."""
test_cases = [
(0.0, 0.00), # 0 signals -> 0% cut
(0.5, 0.00), # 0.5 signals -> 0% cut (below threshold)
(1.0, 0.15), # 1 signal -> 15% cut
(1.5, 0.15), # 1.5 signals -> 15% cut
(2.0, 0.45), # 2 signals -> 45% cut
(2.5, 0.45), # 2.5 signals -> 45% cut
(3.0, 0.55), # 3 signals -> 55% cut
(4.0, 0.75), # 4 signals -> 75% cut
(5.0, 0.80), # 5 signals -> 80% cut
]
for signals, expected_cut in test_cases:
cut = self.acb._get_cut_from_signals(signals)
self.assertEqual(cut, expected_cut,
f"Failed for signals={signals}: got {cut}, expected {expected_cut}")
def test_apply_cut_to_position_size(self):
"""Test applying cut to position size."""
base_size = 1000.0
# Test with no stress (0% cut)
with patch.object(self.acb, 'get_cut_for_date') as mock_get:
mock_get.return_value = {'cut': 0.0, 'signals': 0.0}
final_size, info = self.acb.apply_cut_to_position_size(base_size, '2026-02-06')
self.assertEqual(final_size, base_size)
# Test with moderate stress (45% cut)
with patch.object(self.acb, 'get_cut_for_date') as mock_get:
mock_get.return_value = {'cut': 0.45, 'signals': 2.0}
final_size, info = self.acb.apply_cut_to_position_size(base_size, '2026-02-06')
self.assertAlmostEqual(final_size, base_size * 0.55, places=10) # 1000 * (1 - 0.45)
# Test with extreme stress (80% cut)
with patch.object(self.acb, 'get_cut_for_date') as mock_get:
mock_get.return_value = {'cut': 0.80, 'signals': 5.0}
final_size, info = self.acb.apply_cut_to_position_size(base_size, '2026-02-06')
self.assertAlmostEqual(final_size, base_size * 0.20, places=10) # 1000 * (1 - 0.80)
def test_caching(self):
"""Test that results are cached."""
date_str = '2026-02-06'
# First call should cache
with patch.object(self.acb, '_load_external_factors') as mock_load:
mock_load.return_value = {
'funding_btc': -0.0001,
'dvol_btc': 60,
'fng': 40,
'taker': 0.95
}
result1 = self.acb.get_cut_for_date(date_str)
self.assertEqual(self.acb._stats['total_calls'], 1)
self.assertEqual(self.acb._stats['cache_hits'], 0)
# Second call should use cache
result2 = self.acb.get_cut_for_date(date_str)
self.assertEqual(self.acb._stats['total_calls'], 2)
self.assertEqual(self.acb._stats['cache_hits'], 1)
# Results should be identical
self.assertEqual(result1['cut'], result2['cut'])
def test_stats_tracking(self):
"""Test statistics tracking."""
# Clear stats
self.acb.reset_stats()
# Simulate some calls
with patch.object(self.acb, '_load_external_factors') as mock_load:
mock_load.return_value = {
'funding_btc': -0.0001,
'dvol_btc': 60,
'fng': 40,
'taker': 0.95
}
self.acb.get_cut_for_date('2026-02-06')
self.acb.get_cut_for_date('2026-02-07')
self.acb.get_cut_for_date('2026-02-06') # Should be cached
stats = self.acb.get_stats()
self.assertEqual(stats['total_calls'], 3)
self.assertEqual(stats['cache_hits'], 1)
self.assertGreater(stats['cache_hit_rate'], 0)
class TestACBPositionSizer(unittest.TestCase):
"""Test ACB Position Sizer."""
def setUp(self):
"""Set up test fixtures."""
self.sizer = ACBPositionSizer()
def test_enabled_disabled(self):
"""Test enabling/disabling ACB."""
self.assertTrue(self.sizer.is_enabled())
self.sizer.disable()
self.assertFalse(self.sizer.is_enabled())
# When disabled, should return base size unchanged
size, info = self.sizer.calculate_size(1000.0, '2026-02-06')
self.assertEqual(size, 1000.0)
self.assertFalse(info['enabled'])
self.sizer.enable()
self.assertTrue(self.sizer.is_enabled())
def test_calculate_size_with_acb(self):
"""Test calculate size with ACB enabled."""
base_size = 1000.0
with patch.object(self.sizer.acb, 'get_cut_for_date') as mock_get:
mock_get.return_value = {
'cut': 0.15,
'signals': 1.0,
'factors': {}
}
final_size, info = self.sizer.calculate_size(base_size, '2026-02-06')
self.assertAlmostEqual(final_size, base_size * 0.85, places=10) # 15% cut
self.assertEqual(info['base_size'], base_size)
self.assertAlmostEqual(info['final_size'], final_size, places=10)
self.assertAlmostEqual(info['reduction_pct'], 15.0, places=10)
class TestIntegration(unittest.TestCase):
"""Integration tests."""
def test_feb_6_scenario(self):
"""
Test Feb 6, 2026 scenario (actual crash day).
Expected signals:
- Funding: -0.000137 (very bearish)
- DVOL: 58.9 (elevated)
- FNG: 14 (extreme fear)
Expected: 3+ signals -> 55% cut
"""
acb = AdaptiveCircuitBreaker()
# Mock the external factors for Feb 6
with patch.object(acb, '_load_external_factors') as mock_load:
mock_load.return_value = {
'funding_btc': -0.000137, # Very bearish
'dvol_btc': 58.9, # Elevated
'fng': 14, # Extreme fear
'taker': 0.85, # Mild selling
'available': True
}
result = acb.get_cut_for_date('2026-02-06')
# Should detect 3+ signals
self.assertGreaterEqual(result['signals'], 2.0)
# Should apply 55% or higher cut
self.assertGreaterEqual(result['cut'], 0.45)
def test_normal_day_scenario(self):
"""Test normal market day scenario."""
acb = AdaptiveCircuitBreaker()
with patch.object(acb, '_load_external_factors') as mock_load:
mock_load.return_value = {
'funding_btc': 0.00005, # Slightly positive
'dvol_btc': 45.0, # Normal volatility
'fng': 55, # Neutral/greed
'taker': 1.05, # Slight buying
'available': True
}
result = acb.get_cut_for_date('2026-01-15')
# Should detect 0-1 signals
self.assertLessEqual(result['signals'], 1.0)
# Should apply 0% or 15% cut
self.assertLessEqual(result['cut'], 0.15)
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,154 @@
"""Tests for CircuitBreakerManager."""
import pytest
from datetime import datetime, timedelta
from nautilus_dolphin.nautilus.circuit_breaker import (
CircuitBreakerManager,
CircuitBreakerReason
)
class TestCircuitBreakerManager:
def test_can_open_position_within_limits(self):
"""Test position can be opened when within limits."""
cb = CircuitBreakerManager(max_concurrent_positions=3)
cb._day_start = datetime.now()
can_trade, reason = cb.can_open_position("BTCUSDT", 10000.0)
assert can_trade is True
assert reason == ""
def test_can_open_position_max_positions_reached(self):
"""Test position rejected when max positions reached."""
cb = CircuitBreakerManager(max_concurrent_positions=2)
cb._day_start = datetime.now()
cb._active_positions = {'pos1', 'pos2'}
can_trade, reason = cb.can_open_position("ETHUSDT", 10000.0)
assert can_trade is False
assert "max_positions_reached" in reason
def test_can_open_position_already_exists(self):
"""Test position rejected when asset already has position."""
cb = CircuitBreakerManager(max_concurrent_positions=10)
cb._day_start = datetime.now()
cb._asset_positions = {'BTCUSDT': 'pos1'}
can_trade, reason = cb.can_open_position("BTCUSDT", 10000.0)
assert can_trade is False
assert "position_already_exists" in reason
def test_daily_loss_limit_triggers_circuit_breaker(self):
"""Test circuit breaker trips on daily loss limit."""
cb = CircuitBreakerManager(daily_loss_limit_pct=10.0)
cb.on_trading_day_start(10000.0)
# Simulate losses that exceed 10%
cb.on_position_closed('pos1', 'BTCUSDT', -500.0)
cb.on_position_closed('pos2', 'ETHUSDT', -600.0)
assert cb.is_tripped() is True
assert cb._trip_reason == CircuitBreakerReason.DAILY_LOSS_LIMIT
def test_order_size_sanity_check(self):
"""Test circuit breaker trips on oversized order."""
cb = CircuitBreakerManager(max_order_size_pct=50.0)
cb._day_start = datetime.now()
# Try to submit order for 60% of balance
can_submit, reason = cb.can_submit_order(6000.0, 10000.0)
assert can_submit is False
assert "order_size_sanity_check_failed" in reason
assert cb.is_tripped() is True
assert cb._trip_reason == CircuitBreakerReason.ORDER_SIZE_SANITY
def test_api_failure_tracking(self):
"""Test circuit breaker trips after consecutive API failures."""
cb = CircuitBreakerManager(max_api_failures=3)
# Simulate 3 API failures
cb.on_api_failure("Connection timeout")
cb.on_api_failure("Connection timeout")
cb.on_api_failure("Connection timeout")
assert cb.is_tripped() is True
assert cb._trip_reason == CircuitBreakerReason.API_FAILURE
def test_manual_trip(self):
"""Test manual circuit breaker trip."""
cb = CircuitBreakerManager()
cb.manual_trip("Emergency stop")
assert cb.is_tripped() is True
assert cb._trip_reason == CircuitBreakerReason.MANUAL
def test_auto_reset_after_time(self):
"""Test circuit breaker auto-resets after configured time."""
cb = CircuitBreakerManager(auto_reset_hours=1.0)
cb.manual_trip("Test trip")
assert cb.is_tripped() is True
# Manually set trip time to past
cb._trip_time = datetime.now() - timedelta(hours=2)
# Should auto-reset on next check
assert cb.is_tripped() is False
def test_get_status(self):
"""Test status report."""
cb = CircuitBreakerManager()
cb.on_trading_day_start(10000.0)
status = cb.get_status()
assert 'is_tripped' in status
assert 'active_positions' in status
assert 'daily_pnl' in status
assert status['active_positions'] == 0
class TestCircuitBreakerIntegration:
def test_full_trading_scenario(self):
"""Test complete trading scenario with circuit breaker."""
cb = CircuitBreakerManager(
daily_loss_limit_pct=10.0,
max_concurrent_positions=2,
max_order_size_pct=50.0
)
# Start trading day
cb.on_trading_day_start(10000.0)
# Should allow first position
can_trade, _ = cb.can_open_position("BTCUSDT", 10000.0)
assert can_trade is True
# Open position
cb.on_position_opened("pos1", "BTCUSDT")
# Should allow second position
can_trade, _ = cb.can_open_position("ETHUSDT", 10000.0)
assert can_trade is True
# Open second position
cb.on_position_opened("pos2", "ETHUSDT")
# Should reject third position (max 2)
can_trade, reason = cb.can_open_position("SOLUSDT", 10000.0)
assert can_trade is False
assert "max_positions_reached" in reason
# Close first position with profit
cb.on_position_closed("pos1", "BTCUSDT", 500.0)
# Now should allow new position
can_trade, _ = cb.can_open_position("SOLUSDT", 10000.0)
assert can_trade is True

View File

@@ -0,0 +1,813 @@
"""test_dolphin_actor.py — DolphinActor lifecycle and correctness tests.
Tests cover:
- Champion parameter invariants (frozen champion config → correct defaults)
- ACB pending-flag thread safety (no lost updates, no race on clear)
- HIBERNATE posture guard (on_bar returns immediately, engine not called)
- Date change handling (begin_day / end_day transition)
- Replay mode data loading (bar_idx increments, engine step_bar called)
- HZ-unavailable graceful degradation (posture defaults APEX, no crash)
- Stale-state snapshot guard (detects mid-eval state changes)
- on_stop cleanup (processed_dates cleared, stale events reset)
All tests use unittest.mock to avoid requiring live HZ or parquet files.
Run with:
source /home/dolphin/siloqy_env/bin/activate
cd /mnt/dolphinng5_predict
python -m pytest nautilus_dolphin/tests/test_dolphin_actor.py -v
"""
import sys
import json
import threading
import time
import unittest
from datetime import datetime, timezone
from pathlib import Path
from unittest.mock import MagicMock, patch, PropertyMock
HCM_DIR = Path(__file__).parent.parent.parent
sys.path.insert(0, str(HCM_DIR / "nautilus_dolphin"))
try:
from nautilus_dolphin.nautilus.dolphin_actor import DolphinActor
_HAS_ACTOR = True
except ImportError as _e:
_HAS_ACTOR = False
_IMPORT_ERR = str(_e)
import pytest
# ── Helpers ───────────────────────────────────────────────────────────────────────
def _make_blue_config(**overrides) -> dict:
"""Minimal champion-frozen config dict (matches blue.yml)."""
cfg = {
"strategy_name": "blue",
"direction": "short_only",
"live_mode": False,
"engine": {
"boost_mode": "baseline",
"vel_div_threshold": -0.02,
"vel_div_extreme": -0.05,
"fixed_tp_pct": 0.0095,
"max_hold_bars": 120,
"fraction": 0.20,
"min_leverage": 0.5,
"max_leverage": 5.0,
"abs_max_leverage": 6.0,
"leverage_convexity": 3.0,
"dc_lookback_bars": 7,
"dc_min_magnitude_bps": 0.75,
"min_irp_alignment": 0.45,
"sp_maker_entry_rate": 0.62,
"sp_maker_exit_rate": 0.50,
"seed": 42,
# required but non-champion
"stop_pct": 1.0,
"use_direction_confirm": True,
"dc_skip_contradicts": True,
"dc_leverage_boost": 1.0,
"dc_leverage_reduce": 0.5,
"use_asset_selection": True,
"use_sp_fees": True,
"use_sp_slippage": True,
"use_ob_edge": True,
"ob_edge_bps": 5.0,
"ob_confirm_rate": 0.40,
"lookback": 100,
"use_alpha_layers": True,
"use_dynamic_leverage": True,
},
"paper_trade": {"initial_capital": 25000.0},
"hazelcast": {"imap_pnl": "DOLPHIN_PNL_BLUE"},
}
cfg.update(overrides)
return cfg
def _make_actor_no_hz() -> "DolphinActor":
"""Return a DolphinActor with HZ patched to None and Strategy.__init__ mocked."""
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
return actor
def _make_synthetic_bar(date_str: str = "2026-01-15", bar_seconds: int = 5):
"""Build a minimal Bar-like object with ts_event in nanoseconds."""
dt = datetime.strptime(date_str, "%Y-%m-%d").replace(
hour=0, minute=0, second=bar_seconds, tzinfo=timezone.utc
)
bar = MagicMock()
bar.ts_event = int(dt.timestamp() * 1e9)
return bar
# ── Test: Champion parameter invariants ──────────────────────────────────────────
@pytest.mark.skipif(not _HAS_ACTOR, reason=f"DolphinActor import failed: {locals().get('_IMPORT_ERR','')}")
class TestChampionParamInvariants(unittest.TestCase):
"""Champion params must survive round-trip through DolphinActor.__init__."""
CHAMPION = {
"vel_div_threshold": -0.02,
"vel_div_extreme": -0.05,
"fixed_tp_pct": 0.0095,
"max_hold_bars": 120,
"fraction": 0.20,
"min_leverage": 0.5,
"max_leverage": 5.0,
"abs_max_leverage": 6.0,
"leverage_convexity": 3.0,
"dc_lookback_bars": 7,
"dc_min_magnitude_bps": 0.75,
"min_irp_alignment": 0.45,
"sp_maker_entry_rate": 0.62,
"sp_maker_exit_rate": 0.50,
"seed": 42,
}
def test_champion_config_round_trip(self):
"""Actor init stores config dict without mutation."""
actor = DolphinActor.__new__(DolphinActor)
cfg = _make_blue_config()
DolphinActor.__init__(actor, config=cfg)
eng_cfg = actor.dolphin_config["engine"]
for k, v in self.CHAMPION.items():
self.assertAlmostEqual(eng_cfg[k], v, places=9,
msg=f"Champion param {k} mismatch: {eng_cfg[k]} != {v}")
def test_initial_posture_is_apex(self):
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
self.assertEqual(actor.posture, "APEX")
def test_initial_engine_is_none(self):
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
self.assertIsNone(actor.engine)
def test_initial_hz_client_is_none(self):
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
self.assertIsNone(actor.hz_client)
def test_pending_acb_is_none(self):
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
self.assertIsNone(actor._pending_acb)
def test_acb_lock_is_rlock_compatible(self):
"""_acb_lock must be a threading.Lock (or RLock) — acquire/release must work."""
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
acquired = actor._acb_lock.acquire(timeout=0.1)
self.assertTrue(acquired)
actor._acb_lock.release()
# ── Test: ACB pending-flag thread safety ─────────────────────────────────────────
@pytest.mark.skipif(not _HAS_ACTOR, reason="DolphinActor not available")
class TestACBPendingFlagThreadSafety(unittest.TestCase):
"""Verify the pending-flag pattern prevents lost ACB updates."""
def _make_actor(self):
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
# Attach a minimal mock engine that tracks update_acb_boost calls
actor.engine = MagicMock()
actor.engine._day_base_boost = 1.0
actor.engine._day_beta = 0.0
actor.engine._mc_gate_open = True
return actor
def test_on_acb_event_stores_pending(self):
"""_on_acb_event must set _pending_acb under the lock."""
actor = self._make_actor()
event = MagicMock()
event.value = json.dumps({"boost": 1.5, "beta": 0.3})
actor._on_acb_event(event)
with actor._acb_lock:
pending = actor._pending_acb
self.assertIsNotNone(pending)
self.assertAlmostEqual(pending["boost"], 1.5)
self.assertAlmostEqual(pending["beta"], 0.3)
def test_on_acb_event_empty_value_ignored(self):
"""Empty/None event value must not set pending."""
actor = self._make_actor()
event = MagicMock()
event.value = None
actor._on_acb_event(event)
self.assertIsNone(actor._pending_acb)
def test_on_acb_event_malformed_json_ignored(self):
"""Malformed JSON must be caught; pending stays None."""
actor = self._make_actor()
event = MagicMock()
event.value = "{not-json"
actor._on_acb_event(event)
self.assertIsNone(actor._pending_acb)
def test_concurrent_write_then_on_bar_apply(self):
"""Simulate concurrent HZ listener write + on_bar read — no lost update.
Thread 1 writes boost=2.0 via _on_acb_event().
Thread 2 (main) calls the on_bar ACB-apply section and asserts engine updated.
"""
actor = self._make_actor()
# Simulate _on_acb_event from a background thread
def writer():
event = MagicMock()
event.value = json.dumps({"boost": 2.0, "beta": 0.5})
actor._on_acb_event(event)
t = threading.Thread(target=writer)
t.start()
t.join(timeout=1.0)
# Simulate the ACB-apply section of on_bar()
with actor._acb_lock:
pending = actor._pending_acb
actor._pending_acb = None
self.assertIsNotNone(pending, "ACB update was lost")
actor.engine.update_acb_boost(float(pending["boost"]), float(pending["beta"]))
actor.engine.update_acb_boost.assert_called_once_with(2.0, 0.5)
# After consumption, pending must be None
with actor._acb_lock:
self.assertIsNone(actor._pending_acb)
def test_multiple_rapid_events_last_wins(self):
"""Multiple rapid HZ events — last write wins (expected behaviour)."""
actor = self._make_actor()
for boost in [1.1, 1.2, 1.3, 1.4, 1.5]:
event = MagicMock()
event.value = json.dumps({"boost": boost, "beta": 0.0})
actor._on_acb_event(event)
with actor._acb_lock:
pending = actor._pending_acb
self.assertAlmostEqual(pending["boost"], 1.5)
# ── Test: HIBERNATE posture guard ────────────────────────────────────────────────
@pytest.mark.skipif(not _HAS_ACTOR, reason="DolphinActor not available")
class TestHibernatePostureGuard(unittest.TestCase):
"""When posture=='HIBERNATE', on_bar must return immediately."""
def _make_actor_with_engine(self, posture: str = "HIBERNATE"):
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
actor.posture = posture
actor.current_date = "2026-01-15" # pre-set so no begin_day triggers
actor.engine = MagicMock()
actor.engine._day_base_boost = 1.0
actor.engine._day_beta = 0.0
actor.engine._mc_gate_open = True
actor.hz_client = None
actor._day_data = None
return actor
def test_hibernate_skips_step_bar(self):
"""step_bar must not be called when posture==HIBERNATE."""
actor = self._make_actor_with_engine("HIBERNATE")
bar = _make_synthetic_bar("2026-01-15")
actor.on_bar(bar)
actor.engine.step_bar.assert_not_called()
def test_hibernate_does_not_increment_bar_idx(self):
"""_bar_idx_today must not change on HIBERNATE."""
actor = self._make_actor_with_engine("HIBERNATE")
actor._bar_idx_today = 42
bar = _make_synthetic_bar("2026-01-15")
actor.on_bar(bar)
self.assertEqual(actor._bar_idx_today, 42)
def test_apex_does_attempt_step_bar(self):
"""APEX posture must reach the step_bar call (given valid data)."""
actor = self._make_actor_with_engine("APEX")
# Provide fake day data so on_bar can pull a row
import pandas as pd
import numpy as np
rows = {
"vel_div": np.full(200, -0.03),
"BTCUSDT": np.full(200, 95000.0),
"v50_lambda_max_velocity": np.zeros(200),
"v750_lambda_max_velocity": np.zeros(200),
"instability_50": np.zeros(200),
}
df = pd.DataFrame(rows)
actor._day_data = (df, ["BTCUSDT"])
actor._bar_idx_today = 0
actor.engine.step_bar = MagicMock(return_value={})
actor._write_result_to_hz = MagicMock()
bar = _make_synthetic_bar("2026-01-15")
actor.on_bar(bar)
actor.engine.step_bar.assert_called_once()
# ── Test: Date change handling ────────────────────────────────────────────────────
@pytest.mark.skipif(not _HAS_ACTOR, reason="DolphinActor not available")
class TestDateChangeHandling(unittest.TestCase):
"""Verify begin_day / end_day lifecycle across date boundaries."""
def _make_actor(self):
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
actor.hz_client = None
actor.posture = "APEX"
actor.engine = MagicMock()
actor.engine._day_base_boost = 1.0
actor.engine._day_beta = 0.0
actor.engine._mc_gate_open = True
actor._day_data = None
actor._write_result_to_hz = MagicMock()
return actor
def test_first_bar_calls_begin_day_not_end_day(self):
"""First bar of the session must call begin_day but not end_day."""
actor = self._make_actor()
actor.current_date = None
bar = _make_synthetic_bar("2026-01-15")
# Stub _load_parquet_data to return empty (HIBERNATE-safe; no step_bar needed here)
with patch.object(actor, "_load_parquet_data", return_value=(MagicMock(empty=True), [], None)):
actor.on_bar(bar)
actor.engine.begin_day.assert_called_once_with("2026-01-15", posture="APEX", direction=-1)
actor.engine.end_day.assert_not_called()
def test_date_change_calls_end_day_then_begin_day(self):
"""Date rollover must call end_day() on old date then begin_day() on new."""
actor = self._make_actor()
actor.current_date = "2026-01-14"
actor.engine.end_day = MagicMock(return_value={})
bar_new = _make_synthetic_bar("2026-01-15")
with patch.object(actor, "_load_parquet_data", return_value=(MagicMock(empty=True), [], None)):
actor.on_bar(bar_new)
actor.engine.end_day.assert_called_once()
actor.engine.begin_day.assert_called_once_with("2026-01-15", posture="APEX", direction=-1)
def test_same_date_no_redundant_begin_day(self):
"""Bars arriving on same date must not re-call begin_day."""
actor = self._make_actor()
actor.current_date = "2026-01-15"
import pandas as pd
import numpy as np
rows = {
"vel_div": np.full(200, -0.03),
"BTCUSDT": np.full(200, 95000.0),
"v50_lambda_max_velocity": np.zeros(200),
"v750_lambda_max_velocity": np.zeros(200),
"instability_50": np.zeros(200),
}
actor._day_data = (pd.DataFrame(rows), ["BTCUSDT"])
actor._bar_idx_today = 0
actor.engine.step_bar = MagicMock(return_value={})
bar = _make_synthetic_bar("2026-01-15")
actor.on_bar(bar)
bar2 = _make_synthetic_bar("2026-01-15", bar_seconds=10)
actor.on_bar(bar2)
actor.engine.begin_day.assert_not_called()
def test_direction_short_only_maps_to_minus_one(self):
"""direction='short_only' must pass direction=-1 to begin_day."""
actor = self._make_actor()
actor.current_date = None
actor.dolphin_config["direction"] = "short_only"
bar = _make_synthetic_bar("2026-01-15")
with patch.object(actor, "_load_parquet_data", return_value=(MagicMock(empty=True), [], None)):
actor.on_bar(bar)
_, kwargs = actor.engine.begin_day.call_args
self.assertEqual(kwargs["direction"], -1)
def test_direction_long_maps_to_plus_one(self):
"""direction='long' must pass direction=+1 to begin_day."""
actor = self._make_actor()
actor.current_date = None
actor.dolphin_config["direction"] = "long"
bar = _make_synthetic_bar("2026-01-15")
with patch.object(actor, "_load_parquet_data", return_value=(MagicMock(empty=True), [], None)):
actor.on_bar(bar)
_, kwargs = actor.engine.begin_day.call_args
self.assertEqual(kwargs["direction"], 1)
# ── Test: HZ-unavailable graceful degradation ────────────────────────────────────
@pytest.mark.skipif(not _HAS_ACTOR, reason="DolphinActor not available")
class TestHZUnavailableDegradation(unittest.TestCase):
"""Actor must function (APEX posture, no crash) when HZ is unreachable."""
def test_connect_hz_returns_none_on_failure(self):
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
with patch("hazelcast.HazelcastClient", side_effect=Exception("refused")):
result = actor._connect_hz()
self.assertIsNone(result)
def test_read_posture_returns_apex_when_hz_none(self):
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
actor.hz_client = None
posture = actor._read_posture()
self.assertEqual(posture, "APEX")
def test_write_result_no_hz_is_noop(self):
"""_write_result_to_hz with hz_client=None must silently return."""
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
actor.hz_client = None
# Must not raise
actor._write_result_to_hz("2026-01-15", {"pnl": 1.0})
def test_get_latest_hz_scan_returns_none_when_no_client(self):
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
actor.hz_client = None
result = actor._get_latest_hz_scan()
self.assertIsNone(result)
# ── Test: Replay mode bar index tracking ─────────────────────────────────────────
@pytest.mark.skipif(not _HAS_ACTOR, reason="DolphinActor not available")
class TestReplayModeBarTracking(unittest.TestCase):
"""Verify _bar_idx_today increments correctly in replay mode."""
def _make_actor_with_data(self, n_rows: int = 10):
import pandas as pd
import numpy as np
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
actor.hz_client = None
actor.posture = "APEX"
actor.current_date = "2026-01-15"
actor.engine = MagicMock()
actor.engine._day_base_boost = 1.0
actor.engine._day_beta = 0.0
actor.engine._mc_gate_open = True
actor.engine.step_bar = MagicMock(return_value={})
actor._write_result_to_hz = MagicMock()
rows = {
"vel_div": np.full(n_rows, -0.03),
"BTCUSDT": np.full(n_rows, 95000.0),
"v50_lambda_max_velocity": np.zeros(n_rows),
"v750_lambda_max_velocity": np.zeros(n_rows),
"instability_50": np.zeros(n_rows),
}
actor._day_data = (pd.DataFrame(rows), ["BTCUSDT"])
actor._bar_idx_today = 0
return actor
def test_bar_idx_increments_per_bar(self):
actor = self._make_actor_with_data(10)
bar = _make_synthetic_bar("2026-01-15")
actor.on_bar(bar)
self.assertEqual(actor._bar_idx_today, 1)
actor.on_bar(bar)
self.assertEqual(actor._bar_idx_today, 2)
def test_step_bar_called_with_correct_bar_idx(self):
"""step_bar's bar_idx kwarg must match the pre-increment counter."""
actor = self._make_actor_with_data(10)
bar = _make_synthetic_bar("2026-01-15")
actor.on_bar(bar)
call_kwargs = actor.engine.step_bar.call_args[1]
self.assertEqual(call_kwargs["bar_idx"], 0)
def test_past_end_of_data_returns_silently(self):
"""When _bar_idx_today >= len(df), on_bar must return without stepping."""
actor = self._make_actor_with_data(3)
actor._bar_idx_today = 3 # already past end
bar = _make_synthetic_bar("2026-01-15")
actor.on_bar(bar)
actor.engine.step_bar.assert_not_called()
# ── Test: on_stop cleanup ────────────────────────────────────────────────────────
@pytest.mark.skipif(not _HAS_ACTOR, reason="DolphinActor not available")
class TestOnStopCleanup(unittest.TestCase):
"""on_stop must clear state and shut down HZ client."""
def test_on_stop_clears_processed_dates(self):
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
actor._processed_dates = {"2026-01-13", "2026-01-14"}
actor.hz_client = None
actor.on_stop()
self.assertEqual(len(actor._processed_dates), 0)
def test_on_stop_resets_stale_state_events(self):
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
actor._stale_state_events = 7
actor.hz_client = None
actor.on_stop()
self.assertEqual(actor._stale_state_events, 0)
def test_on_stop_shuts_down_hz_client(self):
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
mock_hz = MagicMock()
actor.hz_client = mock_hz
actor.on_stop()
mock_hz.shutdown.assert_called_once()
def test_on_stop_no_hz_no_crash(self):
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
actor.hz_client = None
actor.on_stop() # must not raise
# ── Test: Stale-state snapshot ────────────────────────────────────────────────────
@pytest.mark.skipif(not _HAS_ACTOR, reason="DolphinActor not available")
class TestStaleStateGuard(unittest.TestCase):
"""_GateSnap before/after comparison must detect mid-eval state changes."""
def test_gate_snap_equality_no_change(self):
"""Identical pre/post snapshots → no stale event logged."""
from nautilus_dolphin.nautilus.dolphin_actor import _GateSnap
before = _GateSnap(acb_boost=1.0, acb_beta=0.0, posture="APEX", mc_gate_open=True)
after = _GateSnap(acb_boost=1.0, acb_beta=0.0, posture="APEX", mc_gate_open=True)
self.assertEqual(before, after)
def test_gate_snap_detects_boost_change(self):
from nautilus_dolphin.nautilus.dolphin_actor import _GateSnap
before = _GateSnap(acb_boost=1.0, acb_beta=0.0, posture="APEX", mc_gate_open=True)
after = _GateSnap(acb_boost=1.5, acb_beta=0.0, posture="APEX", mc_gate_open=True)
self.assertNotEqual(before, after)
def test_gate_snap_detects_posture_change(self):
from nautilus_dolphin.nautilus.dolphin_actor import _GateSnap
before = _GateSnap(acb_boost=1.0, acb_beta=0.0, posture="APEX", mc_gate_open=True)
after = _GateSnap(acb_boost=1.0, acb_beta=0.0, posture="STALKER", mc_gate_open=True)
self.assertNotEqual(before, after)
def test_gate_snap_detects_gate_close(self):
from nautilus_dolphin.nautilus.dolphin_actor import _GateSnap
before = _GateSnap(acb_boost=1.0, acb_beta=0.0, posture="APEX", mc_gate_open=True)
after = _GateSnap(acb_boost=1.0, acb_beta=0.0, posture="APEX", mc_gate_open=False)
self.assertNotEqual(before, after)
def test_gate_snap_fields_list(self):
"""GateSnap must have exactly 4 named fields."""
from nautilus_dolphin.nautilus.dolphin_actor import _GateSnap
self.assertEqual(set(_GateSnap._fields),
{"acb_boost", "acb_beta", "posture", "mc_gate_open"})
# ── Test: Capital persistence (_save_capital / _restore_capital) ─────────────────
@pytest.mark.skipif(not _HAS_ACTOR, reason="DolphinActor not available")
class TestCapitalPersistence(unittest.TestCase):
"""DolphinActor._save_capital / _restore_capital — HZ checkpoint roundtrip."""
def _make_actor_with_state_map(self, capital: float = 31_500.0):
"""Actor with a mock state_map backed by a real dict."""
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
actor.engine = MagicMock()
actor.engine.capital = capital
saved = {}
mock_map = MagicMock()
mock_map.blocking.return_value.put = lambda k, v: saved.update({k: v})
mock_map.blocking.return_value.get = lambda k: saved.get(k)
actor.state_map = mock_map
return actor, saved
def test_save_writes_checkpoint_key(self):
actor, saved = self._make_actor_with_state_map(31_500.0)
DolphinActor._save_capital(actor)
self.assertIn('capital_checkpoint', saved,
"_save_capital must write 'capital_checkpoint' to state_map")
def test_save_persists_correct_value(self):
actor, saved = self._make_actor_with_state_map(99_123.45)
DolphinActor._save_capital(actor)
data = json.loads(saved['capital_checkpoint'])
self.assertAlmostEqual(data['capital'], 99_123.45, places=2)
def test_save_nan_not_written(self):
actor, saved = self._make_actor_with_state_map(float('nan'))
DolphinActor._save_capital(actor)
self.assertNotIn('capital_checkpoint', saved,
"NaN capital must not be persisted")
def test_save_zero_not_written(self):
actor, saved = self._make_actor_with_state_map(0.0)
DolphinActor._save_capital(actor)
self.assertNotIn('capital_checkpoint', saved,
"Zero capital must not be persisted")
def test_save_sub_dollar_not_written(self):
actor, saved = self._make_actor_with_state_map(0.50)
DolphinActor._save_capital(actor)
self.assertNotIn('capital_checkpoint', saved,
"Sub-$1 capital must not be persisted")
def test_save_no_state_map_no_crash(self):
actor, _ = self._make_actor_with_state_map(25_000.0)
actor.state_map = None
DolphinActor._save_capital(actor) # must not raise
def test_restore_recovers_saved_capital(self):
actor, _ = self._make_actor_with_state_map(77_777.77)
DolphinActor._save_capital(actor)
# Fresh actor, same mock map
actor2 = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor2, config=_make_blue_config())
actor2.engine = MagicMock()
actor2.engine.capital = 25_000.0
actor2.state_map = actor.state_map
DolphinActor._restore_capital(actor2)
self.assertAlmostEqual(actor2.engine.capital, 77_777.77, places=2,
msg="Restored capital must match saved value")
def test_restore_stale_72h_ignored(self):
"""Checkpoint older than 72 h must not be restored."""
import time as _time
saved = {}
mock_map = MagicMock()
old_ts = _time.time() - (73 * 3600)
saved['capital_checkpoint'] = json.dumps({'capital': 55_000.0, 'ts': old_ts})
mock_map.blocking.return_value.get = lambda k: saved.get(k)
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
actor.engine = MagicMock()
actor.engine.capital = 25_000.0
actor.state_map = mock_map
DolphinActor._restore_capital(actor)
# Capital must remain at initial value — stale checkpoint must not be applied
self.assertAlmostEqual(actor.engine.capital, 25_000.0, places=2,
msg="Stale (73h) checkpoint must not restore capital")
def test_restore_sub_dollar_ignored(self):
"""Checkpoint with capital < $1 must not be restored."""
import time as _time
saved = {}
mock_map = MagicMock()
saved['capital_checkpoint'] = json.dumps({'capital': 0.001, 'ts': _time.time()})
mock_map.blocking.return_value.get = lambda k: saved.get(k)
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
actor.engine = MagicMock()
actor.engine.capital = 25_000.0
actor.state_map = mock_map
DolphinActor._restore_capital(actor)
# Sub-$1 checkpoint must not be applied
self.assertAlmostEqual(actor.engine.capital, 25_000.0, places=2,
msg="Sub-$1 checkpoint must not restore capital")
def test_restore_no_state_map_no_crash(self):
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config())
actor.engine = MagicMock()
actor.state_map = None
DolphinActor._restore_capital(actor) # must not raise
def test_connect_hz_reads_host_from_config(self):
"""_connect_hz must use config hazelcast.host, not hardcoded localhost."""
actor = DolphinActor.__new__(DolphinActor)
cfg = _make_blue_config(
live_mode=True,
hazelcast={"host": "10.0.0.99:5701", "cluster": "testnet",
"imap_pnl": "DOLPHIN_PNL_BLUE", "state_map": "DOLPHIN_STATE_BLUE"}
)
DolphinActor.__init__(actor, config=cfg)
with patch("hazelcast.HazelcastClient") as mock_hz:
mock_client = MagicMock()
mock_hz.return_value = mock_client
actor._connect_hz()
_, kwargs = mock_hz.call_args
self.assertIn("10.0.0.99:5701", kwargs.get("cluster_members", []),
"_connect_hz must pass config host to HazelcastClient")
self.assertEqual(kwargs.get("cluster_name"), "testnet")
def test_connect_hz_sets_state_map(self):
"""_connect_hz must assign self.state_map from DOLPHIN_STATE_BLUE."""
actor = DolphinActor.__new__(DolphinActor)
cfg = _make_blue_config(live_mode=True,
hazelcast={"host": "localhost:5701", "cluster": "dolphin",
"imap_pnl": "DOLPHIN_PNL_BLUE"})
DolphinActor.__init__(actor, config=cfg)
with patch("hazelcast.HazelcastClient") as mock_hz:
mock_client = MagicMock()
mock_hz.return_value = mock_client
actor._connect_hz()
self.assertIsNotNone(actor.state_map,
"state_map must be set after successful _connect_hz()")
# ── Test: Stablecoin filter + NaN vel_div guard ──────────────────────────────────
@pytest.mark.skipif(not _HAS_ACTOR, reason="DolphinActor not available")
class TestLiveBarGuards(unittest.TestCase):
"""Stablecoin hard-block and NaN/spike vel_div clamp in on_bar() live path."""
def _make_live_actor(self):
actor = DolphinActor.__new__(DolphinActor)
DolphinActor.__init__(actor, config=_make_blue_config(live_mode=True))
actor.hz_client = MagicMock()
actor.posture = "APEX"
actor.current_date = "2026-01-15"
actor.engine = MagicMock()
actor.engine._day_base_boost = 1.0
actor.engine._day_beta = 0.0
actor.engine._mc_gate_open = True
actor.engine.step_bar = MagicMock(return_value={})
actor._write_result_to_hz = MagicMock()
return actor
def _inject_scan(self, actor, scan: dict):
"""Push scan directly into the push cache (bypasses HZ reactor thread)."""
with actor._scan_cache_lock:
actor._latest_scan_cache = scan
actor.last_scan_number = -1
def test_stablecoin_absent_from_prices_passed_to_engine(self):
"""USDCUSDT in scan must be absent from prices dict passed to step_bar."""
actor = self._make_live_actor()
scan = {
'scan_number': 1, 'vel_div': -0.03,
'w50_velocity': -0.03, 'w750_velocity': -0.005, 'instability_50': 0.1,
'assets': ['BTCUSDT', 'ETHUSDT', 'USDCUSDT'],
'asset_prices': [95000.0, 2500.0, 1.0001],
}
self._inject_scan(actor, scan)
bar = _make_synthetic_bar("2026-01-15", bar_seconds=5)
actor.on_bar(bar)
call_kwargs = actor.engine.step_bar.call_args[1]
prices_passed = call_kwargs.get('prices', {})
self.assertNotIn('USDCUSDT', prices_passed,
"USDCUSDT must be hard-blocked before step_bar")
self.assertIn('BTCUSDT', prices_passed)
self.assertIn('ETHUSDT', prices_passed)
def test_nan_vel_div_clamped_to_zero(self):
"""NaN vel_div in scan must be clamped to 0.0 before step_bar."""
actor = self._make_live_actor()
scan = {
'scan_number': 2, 'vel_div': float('nan'),
'w50_velocity': 0.0, 'w750_velocity': 0.0, 'instability_50': 0.0,
'assets': ['BTCUSDT'], 'asset_prices': [95000.0],
}
self._inject_scan(actor, scan)
bar = _make_synthetic_bar("2026-01-15", bar_seconds=5)
actor.on_bar(bar)
call_kwargs = actor.engine.step_bar.call_args[1]
self.assertEqual(call_kwargs['vel_div'], 0.0,
"NaN vel_div must be clamped to 0.0")
def test_spike_vel_div_clamped_to_zero(self):
"""|vel_div| > 0.20 (NG7 restart spike) must be clamped to 0.0."""
actor = self._make_live_actor()
for spike in [-12.76, +12.98, -0.21, +0.21]:
scan = {
'scan_number': 10 + int(abs(spike)), 'vel_div': spike,
'w50_velocity': spike, 'w750_velocity': 0.0, 'instability_50': 0.0,
'assets': ['BTCUSDT'], 'asset_prices': [95000.0],
}
self._inject_scan(actor, scan)
actor.last_scan_number = -1
bar = _make_synthetic_bar("2026-01-15", bar_seconds=5)
actor.on_bar(bar)
call_kwargs = actor.engine.step_bar.call_args[1]
self.assertEqual(call_kwargs['vel_div'], 0.0,
f"vel_div spike={spike} must be clamped to 0.0")
def test_duplicate_scan_number_skipped(self):
"""Same scan_number arriving twice must call step_bar only once."""
actor = self._make_live_actor()
scan = {
'scan_number': 99, 'vel_div': -0.03,
'w50_velocity': -0.03, 'w750_velocity': -0.005, 'instability_50': 0.0,
'assets': ['BTCUSDT'], 'asset_prices': [95000.0],
}
self._inject_scan(actor, scan)
bar = _make_synthetic_bar("2026-01-15", bar_seconds=5)
actor.on_bar(bar)
actor.on_bar(bar) # second call with same scan_number — must be deduped
self.assertEqual(actor.engine.step_bar.call_count, 1,
"Duplicate scan_number must be deduplicated")
# ── Standalone runner ─────────────────────────────────────────────────────────────
if __name__ == "__main__":
pytest.main([__file__, "-v", "--tb=short"])

View File

@@ -0,0 +1,218 @@
import json
import threading
import time
import random
import string
from unittest.mock import Mock, patch
import pytest
from nautilus_dolphin.nautilus.dolphin_actor import DolphinActor
class MockEvent:
def __init__(self, value):
self.value = value
class MockHZMap:
def __init__(self):
self._listeners = []
def add_entry_listener(self, include_value, key, added_func, updated_func):
self._listeners.append((key, added_func, updated_func))
def blocking(self):
return self
class MockHZClient:
def get_map(self, name):
return MockHZMap()
def shutdown(self):
pass
@pytest.fixture
def dolphin_config():
return {
'live_mode': True,
'native_mode': False,
'venue': 'BINANCE',
'engine': {'boost_mode': 'baseline'},
'hazelcast': {'host': 'localhost:5701'}
}
@pytest.fixture
def actor(dolphin_config):
actor = DolphinActor(dolphin_config)
actor.hz_client = MockHZClient()
actor.engine = Mock()
actor._read_posture = Mock(return_value='APEX')
actor.posture = 'APEX'
actor.current_date = '2026-04-01'
# Setup some basic initial state
return actor
def test_signal_path_basic(actor):
"""Test standard HZ push update flows correctly into cache and sets pending."""
scan = {"version": "NG7", "scan_number": 1, "vel_div": -0.05}
event = MockEvent(json.dumps(scan))
assert actor._latest_scan_cache is None
assert actor._scan_pending is False
actor._on_scan_event(event)
assert actor._latest_scan_cache is not None
assert actor._latest_scan_cache["scan_number"] == 1
assert actor._scan_pending is True
def test_timer_path_drains_correctly(actor):
"""Test the timer safely drains the pending flag and processes."""
# Seed cache
scan = {"version": "NG7", "scan_number": 1234, "vel_div": -0.06}
event = MockEvent(json.dumps(scan))
actor._on_scan_event(event)
# Act
actor._on_scan_timer(Mock())
# Assert
assert actor._scan_pending is False
assert actor.engine.step_bar.called
def test_timer_no_op_when_not_pending(actor):
"""Timer should exit immediately if pending is False."""
actor._scan_pending = False
actor._on_scan_timer(Mock())
assert not actor.engine.step_bar.called
def test_race_condition_concurrent_hz_and_timer(actor):
"""Fuzz concurrency: Multiple HZ threads writing while timer is draining."""
scans_written = 0
scans_processed = 0
stop_event = threading.Event()
def simulate_hz_push():
nonlocal scans_written
while not stop_event.is_set():
scan = {"version": "NG7", "scan_number": scans_written, "vel_div": -0.05}
actor._on_scan_event(MockEvent(json.dumps(scan)))
scans_written += 1
time.sleep(0.001)
def simulate_nautilus_timer():
nonlocal scans_processed
while not stop_event.is_set():
# Mock engine step_bar to just return some fake dict and simulate small CPU time
actor.engine.step_bar = Mock(return_value={})
actor._on_scan_timer(MockEvent(None))
if actor.engine.step_bar.called:
scans_processed += 1
time.sleep(0.005)
t1 = threading.Thread(target=simulate_hz_push)
t2 = threading.Thread(target=simulate_hz_push)
t3 = threading.Thread(target=simulate_nautilus_timer)
t1.start()
t2.start()
t3.start()
time.sleep(1.0)
stop_event.set()
t1.join()
t2.join()
t3.join()
# Even with concurrent spam, the actor must survive without crashing,
# locks shouldn't deadlock, and we should process many scans.
assert scans_processed > 0
assert actor._scan_pending in (True, False) # Can be either depending on stopping exactly when
def test_edge_case_malformed_json(actor):
"""Test actor survives malformed JSON pushed from HZ."""
event = MockEvent("{bad_json")
actor._on_scan_event(event)
# Assert we caught it, and didn't set pending
assert actor._scan_pending is False
def test_fuzz_random_scan_payloads(actor):
"""Fuzz the step_bar processor with totally random data payloads."""
for _ in range(100):
random_dict = {
"".join(random.choices(string.ascii_letters, k=5)): random.random()
for _ in range(int(random.random() * 20))
}
random_dict["version"] = "NG7"
random_dict["scan_number"] = random.randint(100, 10000)
# Inject some valid keys but wrong types occasionally
if random.random() > 0.5:
random_dict["vel_div"] = "not_a_float"
event = MockEvent(json.dumps(random_dict))
actor._on_scan_event(event)
# In this fuzz, we just want to ensure it doesn't raise unhandled exceptions
# We Mock step_bar just so we don't test the strategy engine internals here
actor.engine.step_bar = Mock(side_effect=Exception("Fake Engine Error"))
actor._on_scan_timer(Mock())
# Should catch "Fake Engine Error" or gracefully handle the bad types without crashing the actor loop
assert actor._scan_pending is False
def test_stale_state_detection(actor):
"""Verify _GateSnap stale state logic detects when properties shift mid-step."""
scan = {"version": "NG7", "scan_number": 99}
actor._on_scan_event(MockEvent(json.dumps(scan)))
actor.posture = 'APEX'
actor.engine._day_base_boost = 1.0
actor.engine._day_beta = 0.0
actor.engine._mc_gate_open = True
# step_bar modifies the posture under our feet
def fake_step_bar(*args, **kwargs):
actor._read_posture = Mock(return_value='DEFENSIVE')
return {}
actor.engine.step_bar = Mock(side_effect=fake_step_bar)
actor._write_result_to_hz = Mock()
actor._on_scan_timer(Mock())
assert actor._stale_state_events == 1
# Check that it appended stale_state True to result before writing
call_args = actor._write_result_to_hz.call_args
assert call_args[0][1].get("stale_state") is True
def test_acb_update_drain(actor):
"""Ensure ACB pending overrides are drained properly by the timer."""
scan = {"version": "NG7", "scan_number": 5}
actor._on_scan_event(MockEvent(json.dumps(scan)))
actor._pending_acb = {"boost": 1.5, "beta": 0.2}
actor.engine.step_bar = Mock(return_value={})
actor._on_scan_timer(Mock())
# Verify the lock was drained
assert actor._pending_acb is None
actor.engine.update_acb_boost.assert_called_with(1.5, 0.2)
def test_date_boundary_rollover(actor):
"""Verify timer triggers end_day / begin_day when timestamps roll over."""
scan1 = {"version": "NG7", "scan_number": 1, "timestamp_ns": 1704067200000000000} # 2024-01-01
actor._on_scan_event(MockEvent(json.dumps(scan1)))
actor._on_scan_timer(Mock())
assert actor.current_date == '2024-01-01'
actor.engine.end_day.reset_mock()
actor.engine.begin_day.reset_mock()
scan2 = {"version": "NG7", "scan_number": 2, "timestamp_ns": 1704153600000000000} # 2024-01-02
actor._on_scan_event(MockEvent(json.dumps(scan2)))
actor._on_scan_timer(Mock())
assert actor.current_date == '2024-01-02'
assert actor.engine.end_day.call_count == 1
assert actor.engine.begin_day.call_count == 1

View File

@@ -0,0 +1,540 @@
"""
GREEN-BLUE Algorithmic Parity Tests
Verifies that DolphinActor (GREEN) has full algorithmic parity with
the BLUE production system (nautilus_event_trader.py).
Covers:
1. MC_BASE_CFG parameter parity
2. ALGO_VERSION constant match
3. vel_div formula (v50 - v750)
4. vol_ok computation (rolling BTC dvol gate)
5. _BUCKET_SL_PCT parity
6. Engine kwargs gold-spec values
7. Hibernate protection logic
8. NG7 normalization parity
"""
import math
import numpy as np
import pytest
from pathlib import Path
from collections import deque
# ── Import GREEN (DolphinActor) ──────────────────────────────────────────────
import sys
sys.path.insert(0, str(Path(__file__).parent.parent))
sys.path.insert(0, str(Path(__file__).parent.parent / 'nautilus_dolphin'))
from nautilus_dolphin.nautilus.dolphin_actor import (
DolphinActor,
_MC_BASE_CFG,
ALGO_VERSION,
BTC_VOL_WINDOW,
VOL_P60_THRESHOLD,
_BUCKET_SL_PCT,
_GateSnap,
)
# ── Import BLUE constants for comparison ─────────────────────────────────────
_BLUE_ROOT = Path('/mnt/dolphinng5_predict/prod')
sys.path.insert(0, str(_BLUE_ROOT))
# ═══════════════════════════════════════════════════════════════════════════════
# 1. MC_BASE_CFG PARITY
# ═══════════════════════════════════════════════════════════════════════════════
class TestMCBaseCfgParity:
"""GREEN's _MC_BASE_CFG must match BLUE's gold-spec MC config exactly."""
# BLUE gold values (from nautilus_event_trader.py MC_BASE_CFG)
BLUE_GOLD = {
'max_leverage': 8.00,
'max_hold_bars': 250,
'min_irp_alignment': 0.0,
'vel_div_threshold': -0.020,
'vel_div_extreme': -0.050,
'min_leverage': 0.50,
'leverage_convexity': 3.00,
'fraction': 0.20,
'fixed_tp_pct': 0.0095,
'stop_pct': 1.00,
'use_sp_fees': True,
'use_sp_slippage': True,
'sp_maker_entry_rate': 0.62,
'sp_maker_exit_rate': 0.50,
'use_ob_edge': True,
'ob_edge_bps': 5.00,
'ob_confirm_rate': 0.40,
'lookback': 100,
'use_direction_confirm': True,
'dc_lookback_bars': 7,
'dc_min_magnitude_bps': 0.75,
'dc_skip_contradicts': True,
'dc_leverage_boost': 1.00,
'dc_leverage_reduce': 0.50,
'use_asset_selection': True,
'use_alpha_layers': True,
'use_dynamic_leverage': True,
'acb_beta_high': 0.80,
'acb_beta_low': 0.20,
'acb_w750_threshold_pct': 60,
}
@pytest.mark.parametrize("key,expected", list(BLUE_GOLD.items()))
def test_mc_cfg_key_matches_blue(self, key, expected):
assert key in _MC_BASE_CFG, f"Key '{key}' missing from _MC_BASE_CFG"
assert _MC_BASE_CFG[key] == expected, (
f"MC_BASE_CFG['{key}']: GREEN={_MC_BASE_CFG[key]} != BLUE={expected}"
)
def test_max_leverage_is_8x(self):
assert _MC_BASE_CFG['max_leverage'] == 8.0
def test_max_hold_bars_is_250(self):
assert _MC_BASE_CFG['max_hold_bars'] == 250
def test_min_irp_alignment_is_zero(self):
assert _MC_BASE_CFG['min_irp_alignment'] == 0.0
# ═══════════════════════════════════════════════════════════════════════════════
# 2. ALGO_VERSION PARITY
# ═══════════════════════════════════════════════════════════════════════════════
class TestAlgoVersion:
def test_algo_version_is_v2_gold_fix(self):
assert ALGO_VERSION == "v2_gold_fix_v50-v750"
def test_algo_version_is_string(self):
assert isinstance(ALGO_VERSION, str)
def test_algo_version_not_v1_shakedown(self):
assert "v1" not in ALGO_VERSION
assert "v150" not in ALGO_VERSION
# ═══════════════════════════════════════════════════════════════════════════════
# 3. VEL_DIV FORMULA PARITY
# ═══════════════════════════════════════════════════════════════════════════════
class TestVelDivFormula:
"""vel_div must always be v50 - v750, never v50 - v150."""
def test_v50_minus_v750_basic(self):
v50, v750 = -0.03, -0.01
assert (v50 - v750) == pytest.approx(-0.02)
def test_v50_minus_v750_signal_trigger(self):
vel_div = -0.035 - 0.005 # v50=-0.035, v750=0.005
assert vel_div < -0.02 # should trigger entry
def test_not_v50_minus_v150(self):
"""CRITICAL: v50-v150 was the v1 shakedown bug."""
v50, v150, v750 = -0.03, -0.02, -0.01
correct = v50 - v750 # -0.02
buggy = v50 - v150 # -0.01
assert correct != buggy, "v50-v750 must differ from v50-v150"
def test_ng7_normalize_uses_v750(self):
"""Verify _normalize_ng7_scan computes v50-v750."""
scan = {
'version': 'NG7',
'result': {
'multi_window_results': {
'50': {'tracking_data': {'lambda_max_velocity': -0.04}},
'150': {'tracking_data': {'lambda_max_velocity': -0.02}},
'750': {'tracking_data': {'lambda_max_velocity': -0.01}},
},
'pricing_data': {'current_prices': {'BTCUSDT': 70000.0}},
'regime_prediction': {'instability_score': 0.5},
},
}
result = DolphinActor._normalize_ng7_scan(scan)
expected = -0.04 - (-0.01) # v50 - v750 = -0.03
assert abs(result['vel_div'] - expected) < 1e-10, (
f"vel_div={result['vel_div']} but expected v50-v750={expected}"
)
def test_ng7_normalize_not_v150(self):
scan = {
'version': 'NG7',
'result': {
'multi_window_results': {
'50': {'tracking_data': {'lambda_max_velocity': -0.04}},
'150': {'tracking_data': {'lambda_max_velocity': -0.02}},
'750': {'tracking_data': {'lambda_max_velocity': -0.01}},
},
'pricing_data': {'current_prices': {'BTCUSDT': 70000.0}},
'regime_prediction': {'instability_score': 0.5},
},
}
result = DolphinActor._normalize_ng7_scan(scan)
buggy = -0.04 - (-0.02) # v50 - v150 = -0.02
assert result['vel_div'] != buggy, "vel_div must NOT be v50-v150"
# ═══════════════════════════════════════════════════════════════════════════════
# 4. VOL_OK COMPUTATION PARITY
# ═══════════════════════════════════════════════════════════════════════════════
class TestVolOkParity:
"""GREEN must use the same BTC rolling dvol gate as BLUE."""
def _make_actor(self):
config = {'strategy_name': 'test', 'engine': {}}
return DolphinActor(config)
def test_vol_p60_threshold_matches_blue(self):
assert VOL_P60_THRESHOLD == 0.00009868
def test_btc_vol_window_matches_blue(self):
assert BTC_VOL_WINDOW == 50
def test_vol_ok_returns_true_when_insufficient_data(self):
actor = self._make_actor()
scan = {'assets': ['BTCUSDT'], 'asset_prices': [70000.0]}
assert actor._compute_vol_ok(scan) is True
def test_vol_ok_returns_true_when_no_btc(self):
actor = self._make_actor()
scan = {'assets': ['ETHUSDT'], 'asset_prices': [3500.0]}
assert actor._compute_vol_ok(scan) is True
def test_vol_ok_returns_true_high_vol(self):
"""High volatility regime should pass vol_ok."""
actor = self._make_actor()
# Simulate volatile BTC prices
base = 70000.0
for i in range(52):
noise = (i % 3 - 1) * 500.0 # large swings
actor.btc_prices.append(base + noise)
scan = {'assets': ['BTCUSDT'], 'asset_prices': [70000.0 + 500.0]}
assert actor._compute_vol_ok(scan) is True
def test_vol_ok_returns_false_low_vol(self):
"""Very low volatility should fail vol_ok."""
actor = self._make_actor()
base = 70000.0
for i in range(52):
actor.btc_prices.append(base + i * 0.001) # near-zero variance
scan = {'assets': ['BTCUSDT'], 'asset_prices': [70000.052]}
assert actor._compute_vol_ok(scan) is False
def test_vol_ok_empty_scan(self):
actor = self._make_actor()
assert actor._compute_vol_ok({}) is True
def test_vol_ok_matches_blue_formula(self):
"""Verify the dvol computation formula matches BLUE exactly."""
actor = self._make_actor()
# Use volatile prices so dvol > threshold
prices = [70000.0 + ((-1)**i) * (i * 50.0) for i in range(52)]
for p in prices:
actor.btc_prices.append(p)
scan = {'assets': ['BTCUSDT'], 'asset_prices': [70000.0]}
# Manually compute like BLUE
arr = np.array(list(actor.btc_prices))
expected_dvol = float(np.std(np.diff(arr) / arr[:-1]))
result = actor._compute_vol_ok(scan)
assert result == (expected_dvol > VOL_P60_THRESHOLD), (
f"dvol={expected_dvol:.8f} threshold={VOL_P60_THRESHOLD} result={result}"
)
# ═══════════════════════════════════════════════════════════════════════════════
# 5. BUCKET SL PCT PARITY
# ═══════════════════════════════════════════════════════════════════════════════
class TestBucketSlPctParity:
"""GREEN must have the same per-bucket SL percentages as BLUE."""
BLUE_BUCKET_SL = {
0: 0.015, 1: 0.012, 2: 0.015, 3: 0.025,
4: 0.008, 5: 0.018, 6: 0.030, 'default': 0.015,
}
@pytest.mark.parametrize("bucket_id,expected", list(BLUE_BUCKET_SL.items()))
def test_bucket_sl_matches_blue(self, bucket_id, expected):
assert bucket_id in _BUCKET_SL_PCT, f"Bucket {bucket_id} missing"
assert _BUCKET_SL_PCT[bucket_id] == expected
def test_all_7_buckets_present(self):
for i in range(7):
assert i in _BUCKET_SL_PCT, f"Bucket {i} missing"
def test_default_present(self):
assert 'default' in _BUCKET_SL_PCT
# ═══════════════════════════════════════════════════════════════════════════════
# 6. ENGINE KWARGS GOLD-SPEC PARITY (via green.yml)
# ═══════════════════════════════════════════════════════════════════════════════
class TestGreenYmlParity:
"""Verify green.yml engine config matches BLUE's ENGINE_KWARGS."""
@pytest.fixture
def green_config(self):
import yaml
cfg_path = Path('/mnt/dolphinng5_predict/prod/configs/green.yml')
if not cfg_path.exists():
pytest.skip("green.yml not found")
with open(cfg_path) as f:
return yaml.safe_load(f)
BLUE_GOLD_ENGINE = {
'vel_div_threshold': -0.02,
'vel_div_extreme': -0.05,
'min_leverage': 0.5,
'max_leverage': 8.0,
'leverage_convexity': 3.0,
'fraction': 0.20,
'fixed_tp_pct': 0.0095,
'stop_pct': 1.0,
'max_hold_bars': 250,
'use_direction_confirm': True,
'dc_lookback_bars': 7,
'dc_min_magnitude_bps': 0.75,
'dc_skip_contradicts': True,
'dc_leverage_boost': 1.0,
'dc_leverage_reduce': 0.5,
'use_asset_selection': True,
'min_irp_alignment': 0.0,
'use_sp_fees': True,
'use_sp_slippage': True,
'sp_maker_entry_rate': 0.62,
'sp_maker_exit_rate': 0.50,
'use_ob_edge': True,
'ob_edge_bps': 5.0,
'ob_confirm_rate': 0.40,
'lookback': 100,
'use_alpha_layers': True,
'use_dynamic_leverage': True,
'seed': 42,
}
@pytest.mark.parametrize("key,expected", list(BLUE_GOLD_ENGINE.items()))
def test_engine_param_matches_blue(self, green_config, key, expected):
eng = green_config.get('engine', {})
assert eng.get(key) == expected, (
f"green.yml engine['{key}']: got={eng.get(key)} expected={expected}"
)
def test_direction_is_short_only(self, green_config):
assert green_config.get('direction') == 'short_only'
def test_strategy_name_is_green(self, green_config):
assert green_config.get('strategy_name') == 'green'
def test_hz_state_map_is_green(self, green_config):
hz = green_config.get('hazelcast', {})
assert 'GREEN' in hz.get('state_map', ''), "State map must be GREEN-specific"
def test_hz_pnl_map_is_green(self, green_config):
hz = green_config.get('hazelcast', {})
assert 'GREEN' in hz.get('imap_pnl', ''), "PNL map must be GREEN-specific"
# ═══════════════════════════════════════════════════════════════════════════════
# 7. HIBERNATE PROTECTION PARITY
# ═══════════════════════════════════════════════════════════════════════════════
class TestHibernateProtectionParity:
"""Verify GREEN has hibernate protection matching BLUE's behavior."""
def _make_actor(self):
config = {'strategy_name': 'test', 'engine': {}}
return DolphinActor(config)
def test_actor_has_hibernate_protect_method(self):
actor = self._make_actor()
assert hasattr(actor, '_hibernate_protect_position')
def test_actor_has_hibernate_protect_active_field(self):
actor = self._make_actor()
assert hasattr(actor, '_hibernate_protect_active')
assert actor._hibernate_protect_active is None
def test_actor_has_bucket_assignments_field(self):
actor = self._make_actor()
assert hasattr(actor, '_bucket_assignments')
assert isinstance(actor._bucket_assignments, dict)
def test_actor_has_compute_vol_ok(self):
actor = self._make_actor()
assert hasattr(actor, '_compute_vol_ok')
assert callable(actor._compute_vol_ok)
def test_actor_has_btc_prices_deque(self):
actor = self._make_actor()
assert hasattr(actor, 'btc_prices')
assert isinstance(actor.btc_prices, deque)
def test_hibernate_noop_when_no_position(self):
actor = self._make_actor()
actor.engine = None
# Should not raise
actor._hibernate_protect_position()
def test_hibernate_label_map(self):
"""Verify the hibernate exit reason re-labeling matches BLUE."""
_map = {
'FIXED_TP': 'HIBERNATE_TP',
'STOP_LOSS': 'HIBERNATE_SL',
'MAX_HOLD': 'HIBERNATE_MAXHOLD',
}
assert _map == {
'FIXED_TP': 'HIBERNATE_TP',
'STOP_LOSS': 'HIBERNATE_SL',
'MAX_HOLD': 'HIBERNATE_MAXHOLD',
}
# ═══════════════════════════════════════════════════════════════════════════════
# 8. E2E PARITY: REPLAY DAY MATCHES BLUE
# ═══════════════════════════════════════════════════════════════════════════════
class TestE2EReplayParity:
"""Run a known-good day through both engines and compare capital trajectories."""
KNOWN_GOOD_DATE = '2026-02-25'
@pytest.fixture
def parquet_path(self):
p = Path(f'/mnt/dolphinng5_predict/vbt_cache_klines/{self.KNOWN_GOOD_DATE}.parquet')
if not p.exists():
pytest.skip(f"Parquet for {self.KNOWN_GOOD_DATE} not found")
return p
def test_replay_day_produces_finite_capital(self, parquet_path):
"""Run a full day replay and verify capital is finite and positive."""
import pandas as pd
from nautilus_dolphin.nautilus.proxy_boost_engine import create_d_liq_engine
df = pd.read_parquet(parquet_path)
meta_cols = {
'timestamp', 'scan_number', 'v50_lambda_max_velocity',
'v150_lambda_max_velocity', 'v300_lambda_max_velocity',
'v750_lambda_max_velocity', 'vel_div', 'instability_50', 'instability_150',
}
asset_columns = [c for c in df.columns if c not in meta_cols]
engine = create_d_liq_engine(
initial_capital=25000.0,
vel_div_threshold=-0.02, vel_div_extreme=-0.05,
min_leverage=0.5, max_leverage=8.0,
leverage_convexity=3.0, fraction=0.20,
fixed_tp_pct=0.0095, stop_pct=1.0, max_hold_bars=250,
use_direction_confirm=True, dc_lookback_bars=7,
dc_min_magnitude_bps=0.75, dc_skip_contradicts=True,
dc_leverage_boost=1.0, dc_leverage_reduce=0.5,
use_asset_selection=True, min_irp_alignment=0.0,
use_sp_fees=True, use_sp_slippage=True,
sp_maker_entry_rate=0.62, sp_maker_exit_rate=0.50,
use_ob_edge=True, ob_edge_bps=5.0, ob_confirm_rate=0.40,
lookback=100, use_alpha_layers=True,
use_dynamic_leverage=True, seed=42,
)
# Vol ok mask
vol_ok_mask = np.zeros(len(df), dtype=bool)
bp = df['BTCUSDT'].values if 'BTCUSDT' in df.columns else None
if bp is not None:
dv = np.full(len(bp), np.nan)
for j in range(50, len(bp)):
seg = bp[max(0, j - 50):j]
with np.errstate(invalid='ignore', divide='ignore'):
rets = np.diff(seg) / seg[:-1]
fin = rets[np.isfinite(rets)]
if len(fin) >= 5:
dv[j] = float(np.std(fin))
vol_ok_mask = np.where(np.isfinite(dv), dv > VOL_P60_THRESHOLD, False)
engine.begin_day(self.KNOWN_GOOD_DATE, posture='APEX', direction=-1)
engine.process_day(
self.KNOWN_GOOD_DATE, df, asset_columns,
vol_regime_ok=vol_ok_mask, direction=-1, posture='APEX',
)
capital = engine.capital
assert math.isfinite(capital), f"Capital not finite: {capital}"
assert capital > 0, f"Capital not positive: {capital}"
def test_replay_produces_trades(self, parquet_path):
"""Verify the engine actually trades on the known-good date."""
import pandas as pd
from nautilus_dolphin.nautilus.proxy_boost_engine import create_d_liq_engine
df = pd.read_parquet(parquet_path)
meta_cols = {
'timestamp', 'scan_number', 'v50_lambda_max_velocity',
'v150_lambda_max_velocity', 'v300_lambda_max_velocity',
'v750_lambda_max_velocity', 'vel_div', 'instability_50', 'instability_150',
}
asset_columns = [c for c in df.columns if c not in meta_cols]
engine = create_d_liq_engine(
initial_capital=25000.0,
vel_div_threshold=-0.02, vel_div_extreme=-0.05,
min_leverage=0.5, max_leverage=8.0,
leverage_convexity=3.0, fraction=0.20,
fixed_tp_pct=0.0095, stop_pct=1.0, max_hold_bars=250,
use_direction_confirm=True, dc_lookback_bars=7,
dc_min_magnitude_bps=0.75, dc_skip_contradicts=True,
dc_leverage_boost=1.0, dc_leverage_reduce=0.5,
use_asset_selection=True, min_irp_alignment=0.0,
use_sp_fees=True, use_sp_slippage=True,
sp_maker_entry_rate=0.62, sp_maker_exit_rate=0.50,
use_ob_edge=True, ob_edge_bps=5.0, ob_confirm_rate=0.40,
lookback=100, use_alpha_layers=True,
use_dynamic_leverage=True, seed=42,
)
vol_ok_mask = np.ones(len(df), dtype=bool)
result = engine.process_day(
self.KNOWN_GOOD_DATE, df, asset_columns,
vol_regime_ok=vol_ok_mask, direction=-1, posture='APEX',
)
trades = result.get('trades', 0)
assert trades > 0, f"Expected trades on {self.KNOWN_GOOD_DATE}, got {trades}"
# ═══════════════════════════════════════════════════════════════════════════════
# 9. CH OUTPUT SEPARATION
# ═══════════════════════════════════════════════════════════════════════════════
class TestOutputSeparation:
"""GREEN must write to GREEN-specific CH/HZ channels, never BLUE."""
def _make_actor(self):
config = {
'strategy_name': 'green',
'engine': {},
'hazelcast': {
'imap_pnl': 'DOLPHIN_PNL_GREEN',
'state_map': 'DOLPHIN_STATE_GREEN',
},
}
return DolphinActor(config)
def test_strategy_name_is_green(self):
actor = self._make_actor()
assert actor._strategy_name == 'green'
def test_default_strategy_name_is_green(self):
actor = DolphinActor({})
assert actor._strategy_name == 'green'
def test_hz_pnl_map_from_config(self):
actor = self._make_actor()
imap_name = actor.dolphin_config.get('hazelcast', {}).get('imap_pnl', '')
assert 'GREEN' in imap_name
def test_hz_state_map_from_config(self):
actor = self._make_actor()
state_map = actor.dolphin_config.get('hazelcast', {}).get('state_map', '')
assert 'GREEN' in state_map

View File

@@ -0,0 +1,212 @@
import sys
import os
import unittest
import json
import time
from pathlib import Path
# Add correctly mapped paths for the ND system
ROOT_DIR = Path(__file__).parent.parent.parent
sys.path.insert(0, str(ROOT_DIR / "nautilus_dolphin"))
sys.path.insert(0, str(ROOT_DIR))
import numpy as np
import logging
from unittest.mock import MagicMock, patch
from collections import deque
from datetime import datetime, timezone
from nautilus_dolphin.nautilus.ob_features import (
OBFeatureEngine, OBPlacementFeatures, OBSignalFeatures, OBMacroFeatures,
NEUTRAL_PLACEMENT, NEUTRAL_SIGNAL, NEUTRAL_MACRO
)
from nautilus_dolphin.nautilus.ob_provider import OBSnapshot
from nautilus_dolphin.nautilus.hz_ob_provider import HZOBProvider
class TestHZOBProviderLive(unittest.TestCase):
def setUp(self):
self.mock_provider = MagicMock(spec=HZOBProvider)
self.engine = OBFeatureEngine(self.mock_provider)
def test_step_live_fetches_snapshots(self):
"""Test that step_live calls provider.get_snapshot for all assets."""
assets = ["BTCUSDT", "ETHUSDT"]
self.mock_provider.get_snapshot.return_value = None
self.engine.step_live(assets, bar_idx=100)
self.assertEqual(self.mock_provider.get_snapshot.call_count, 2)
self.assertTrue(self.engine._live_mode)
self.assertEqual(self.engine._live_bar_idx, 100)
def test_step_live_populates_placement_cache(self):
"""Test that placement features are correctly computed and cached in live mode."""
asset = "BTCUSDT"
snap = OBSnapshot(
timestamp=time.time(),
asset=asset,
bid_notional=np.array([1000.0, 2000.0, 3000.0, 4000.0, 5000.0]),
ask_notional=np.array([1100.0, 2100.0, 3100.0, 4100.0, 5100.0]),
bid_depth=np.array([1.0, 2.0, 3.0, 4.0, 5.0]),
ask_depth=np.array([1.1, 2.1, 3.1, 4.1, 5.1])
)
self.mock_provider.get_snapshot.return_value = snap
self.engine.step_live([asset], bar_idx=5)
placement = self.engine.get_placement(asset, 5)
self.assertAlmostEqual(placement.depth_1pct_usd, 2100.0) # 1000 + 1100
self.assertGreater(placement.fill_probability, 0.5)
def test_step_live_populates_signal_cache(self):
"""Test that signal features (imbalance, persistence) are computed in live mode."""
asset = "BTCUSDT"
# Snapshot with heavy bid exposure (imbalance > 0)
snap = OBSnapshot(
timestamp=time.time(),
asset=asset,
bid_notional=np.array([5000.0, 0, 0, 0, 0]),
ask_notional=np.array([1000.0, 0, 0, 0, 0]),
bid_depth=np.ones(5), ask_depth=np.ones(5)
)
self.mock_provider.get_snapshot.return_value = snap
# Step twice to check histories
self.engine.step_live([asset], bar_idx=10)
self.engine.step_live([asset], bar_idx=11)
signal = self.engine.get_signal(asset, 11)
self.assertAlmostEqual(signal.imbalance, (5000-1000)/(5000+1000))
self.assertEqual(signal.imbalance_persistence, 1.0) # both positive
def test_step_live_market_features(self):
"""Test cross-asset agreement and cascade signal."""
assets = ["BTCUSDT", "ETHUSDT"]
# BTC withdrawing (vel < -0.1), ETH building (vel > 0)
snaps = {
"BTCUSDT": [
OBSnapshot(time.time(), "BTCUSDT", np.array([2000.0]*5), np.array([2000.0]*5), np.ones(5), np.ones(5)),
OBSnapshot(time.time(), "BTCUSDT", np.array([1000.0]*5), np.array([1000.0]*5), np.ones(5), np.ones(5))
],
"ETHUSDT": [
OBSnapshot(time.time(), "ETHUSDT", np.array([1000.0]*5), np.array([1000.0]*5), np.ones(5), np.ones(5)),
OBSnapshot(time.time(), "ETHUSDT", np.array([1200.0]*5), np.array([1200.0]*5), np.ones(5), np.ones(5))
]
}
self._snap_idx = 0
def side_effect(asset, ts):
return snaps[asset][self._snap_idx]
self.mock_provider.get_snapshot.side_effect = side_effect
self._snap_idx = 0
self.engine.step_live(assets, bar_idx=0)
self._snap_idx = 1
self.engine.step_live(assets, bar_idx=1)
macro = self.engine.get_macro(1)
# BTC vel = (2000-4000)/4000 = -0.5
# ETH vel = (2400-2000)/2000 = +0.2
# cascade count should be 1 if threshold is -0.1
self.assertEqual(macro.cascade_count, 1)
def test_step_live_none_snapshot_skipped(self):
"""Test that None snapshots are skipped without error."""
self.mock_provider.get_snapshot.return_value = None
self.engine.step_live(["BTCUSDT"], bar_idx=20)
self.assertEqual(self.engine._live_stale_count, 1)
def test_step_live_stale_warning(self):
"""Test that stale count increments correctly."""
self.mock_provider.get_snapshot.return_value = None
for i in range(3):
self.engine.step_live(["BTCUSDT"], bar_idx=i)
self.assertEqual(self.engine._live_stale_count, 3)
def test_step_live_cache_eviction(self):
"""Test that live caches are evicted after MAX_LIVE_CACHE entries."""
asset = "BTCUSDT"
snap = OBSnapshot(time.time(), asset, np.array([1000.0]*5), np.array([1000.0]*5), np.ones(5), np.ones(5))
self.mock_provider.get_snapshot.return_value = snap
for i in range(505):
self.engine.step_live([asset], bar_idx=i)
self.assertEqual(len(self.engine._live_placement[asset]), 500)
self.assertNotIn(0, self.engine._live_placement[asset])
self.assertIn(504, self.engine._live_placement[asset])
def test_resolve_idx_live_mode(self):
"""Test index resolution in live mode."""
self.engine._live_mode = True
self.engine._live_placement["BTCUSDT"] = {10: MagicMock()}
idx = self.engine._resolve_idx("BTCUSDT", 10.0)
self.assertEqual(idx, 10)
def test_resolve_idx_live_fallback(self):
"""Test fallback to latest bar in live mode."""
self.engine._live_mode = True
self.engine._live_placement["BTCUSDT"] = {10: MagicMock(), 15: MagicMock()}
idx = self.engine._resolve_idx("BTCUSDT", 20.0) # unknown bar
self.assertEqual(idx, 15)
def test_median_depth_ema(self):
"""Test that _median_depth_ref converges via EMA."""
asset = "BTCUSDT"
# Init with 2000
snap1 = OBSnapshot(time.time(), asset, np.array([1000.0]*5), np.array([1000.0]*5), np.ones(5), np.ones(5))
self.mock_provider.get_snapshot.return_value = snap1
self.engine.step_live([asset], bar_idx=0)
self.assertEqual(self.engine._median_depth_ref[asset], 2000.0)
# Next value 4000
snap2 = OBSnapshot(time.time(), asset, np.array([2000.0]*5), np.array([2000.0]*5), np.ones(5), np.ones(5))
self.mock_provider.get_snapshot.return_value = snap2
self.engine.step_live([asset], bar_idx=1)
# 0.99 * 2000 + 0.01 * 4000 = 1980 + 40 = 2020
self.assertAlmostEqual(self.engine._median_depth_ref[asset], 2020.0)
def test_hz_ob_provider_timestamp_iso(self):
"""Test ISO string normalization in HZOBProvider."""
provider = HZOBProvider()
mock_imap = MagicMock()
provider._imap = mock_imap
iso_ts = "2026-03-26T12:00:00+00:00"
expected_ts = datetime.fromisoformat(iso_ts).replace(tzinfo=timezone.utc).timestamp()
payload = json.dumps({
"timestamp": iso_ts,
"bid_notional": [1.0]*5, "ask_notional": [1.0]*5,
"bid_depth": [1.0]*5, "ask_depth": [1.0]*5
})
mock_imap.get.return_value = payload
snap = provider.get_snapshot("BTCUSDT", time.time())
self.assertEqual(snap.timestamp, expected_ts)
def test_hz_ob_provider_timestamp_float(self):
"""Test float timestamp pass-through in HZOBProvider."""
provider = HZOBProvider()
mock_imap = MagicMock()
provider._imap = mock_imap
float_ts = 1711454400.0
payload = json.dumps({
"timestamp": float_ts,
"bid_notional": [1.0]*5, "ask_notional": [1.0]*5,
"bid_depth": [1.0]*5, "ask_depth": [1.0]*5
})
mock_imap.get.return_value = payload
snap = provider.get_snapshot("BTCUSDT", time.time())
self.assertEqual(snap.timestamp, float_ts)
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1,221 @@
"""
test_live_price_feed.py — Unit tests for live_price_feed module.
Tests cover all three implementations (NullPriceFeed, ConstantPriceFeed,
NautilusCachePriceFeed) and verify Protocol structural conformance.
"""
import sys
import unittest
from unittest.mock import MagicMock
sys.path.insert(0, 'nautilus_dolphin')
from nautilus_dolphin.nautilus.live_price_feed import (
PriceFeed,
NullPriceFeed,
ConstantPriceFeed,
NautilusCachePriceFeed,
)
class TestNullPriceFeed(unittest.TestCase):
def setUp(self):
self.feed = NullPriceFeed()
def test_bid_returns_none(self):
self.assertIsNone(self.feed.bid('BTCUSDT'))
def test_ask_returns_none(self):
self.assertIsNone(self.feed.ask('BTCUSDT'))
def test_mid_returns_none(self):
self.assertIsNone(self.feed.mid('BTCUSDT'))
def test_last_update_ns_returns_zero(self):
self.assertEqual(self.feed.last_update_ns('BTCUSDT'), 0)
def test_unknown_symbol_returns_none(self):
self.assertIsNone(self.feed.bid('UNKNOWNSYM'))
def test_satisfies_price_feed_protocol(self):
self.assertIsInstance(self.feed, PriceFeed)
class TestConstantPriceFeed(unittest.TestCase):
def setUp(self):
self.feed = ConstantPriceFeed({
'BTCUSDT': (82000.0, 82010.0),
'ETHUSDT': (1595.5, 1596.0),
})
def test_bid_known_symbol(self):
self.assertAlmostEqual(self.feed.bid('BTCUSDT'), 82000.0)
def test_ask_known_symbol(self):
self.assertAlmostEqual(self.feed.ask('BTCUSDT'), 82010.0)
def test_mid_known_symbol(self):
expected = (82000.0 + 82010.0) / 2.0
self.assertAlmostEqual(self.feed.mid('BTCUSDT'), expected)
def test_bid_second_symbol(self):
self.assertAlmostEqual(self.feed.bid('ETHUSDT'), 1595.5)
def test_ask_second_symbol(self):
self.assertAlmostEqual(self.feed.ask('ETHUSDT'), 1596.0)
def test_unknown_symbol_bid_returns_none(self):
self.assertIsNone(self.feed.bid('SOLUSDT'))
def test_unknown_symbol_ask_returns_none(self):
self.assertIsNone(self.feed.ask('SOLUSDT'))
def test_unknown_symbol_mid_returns_none(self):
self.assertIsNone(self.feed.mid('SOLUSDT'))
def test_last_update_ns_returns_zero(self):
self.assertEqual(self.feed.last_update_ns('BTCUSDT'), 0)
def test_update_changes_price(self):
self.feed.update('BTCUSDT', 81000.0, 81010.0)
self.assertAlmostEqual(self.feed.bid('BTCUSDT'), 81000.0)
self.assertAlmostEqual(self.feed.ask('BTCUSDT'), 81010.0)
def test_update_new_symbol(self):
self.feed.update('SOLUSDT', 120.0, 120.05)
self.assertAlmostEqual(self.feed.bid('SOLUSDT'), 120.0)
def test_remove_symbol(self):
self.feed.remove('BTCUSDT')
self.assertIsNone(self.feed.bid('BTCUSDT'))
def test_remove_unknown_is_noop(self):
# Should not raise
self.feed.remove('NONEXISTENT')
def test_empty_feed_construction(self):
feed = ConstantPriceFeed()
self.assertIsNone(feed.bid('BTCUSDT'))
def test_satisfies_price_feed_protocol(self):
self.assertIsInstance(self.feed, PriceFeed)
class TestNautilusCachePriceFeed(unittest.TestCase):
def _make_quote_tick(self, bid: float, ask: float, ts_event: int = 1234567890_000_000_000):
qt = MagicMock()
qt.bid_price = bid
qt.ask_price = ask
qt.ts_event = ts_event
return qt
def _make_cache(self, quote_ticks: dict):
"""
Build a mock NT cache where quote_tick(iid) returns the given tick
if iid.value is in quote_ticks, else None.
"""
cache = MagicMock()
def _quote_tick(iid):
return quote_ticks.get(str(iid))
cache.quote_tick.side_effect = _quote_tick
return cache
def _make_iid(self, symbol_venue: str):
"""Return a mock InstrumentId whose str() is symbol_venue."""
iid = MagicMock()
iid.__str__ = lambda self: symbol_venue
return iid
def setUp(self):
btc_tick = self._make_quote_tick(82000.0, 82010.0, ts_event=9_999_999)
eth_tick = self._make_quote_tick(1595.5, 1596.0, ts_event=8_888_888)
self.cache = self._make_cache({
'BTCUSDT.BINANCE': btc_tick,
'ETHUSDT.BINANCE': eth_tick,
})
self.feed = NautilusCachePriceFeed(self.cache, venue='BINANCE')
def _patch_iid(self, feed, symbol, venue):
"""Pre-populate the IID cache so the mock cache key matches."""
iid = MagicMock()
iid.__str__ = lambda s: f'{symbol}.{venue}'
feed._iid_cache[symbol] = iid
def test_bid_returns_float(self):
self._patch_iid(self.feed, 'BTCUSDT', 'BINANCE')
self.assertAlmostEqual(self.feed.bid('BTCUSDT'), 82000.0)
def test_ask_returns_float(self):
self._patch_iid(self.feed, 'BTCUSDT', 'BINANCE')
self.assertAlmostEqual(self.feed.ask('BTCUSDT'), 82010.0)
def test_mid_is_average(self):
self._patch_iid(self.feed, 'BTCUSDT', 'BINANCE')
expected = (82000.0 + 82010.0) / 2.0
self.assertAlmostEqual(self.feed.mid('BTCUSDT'), expected)
def test_no_quote_returns_none(self):
self._patch_iid(self.feed, 'SOLUSDT', 'BINANCE')
self.assertIsNone(self.feed.bid('SOLUSDT'))
self.assertIsNone(self.feed.ask('SOLUSDT'))
self.assertIsNone(self.feed.mid('SOLUSDT'))
def test_last_update_ns_returns_ts_event(self):
self._patch_iid(self.feed, 'BTCUSDT', 'BINANCE')
self.assertEqual(self.feed.last_update_ns('BTCUSDT'), 9_999_999)
def test_last_update_ns_no_quote_returns_zero(self):
self._patch_iid(self.feed, 'SOLUSDT', 'BINANCE')
self.assertEqual(self.feed.last_update_ns('SOLUSDT'), 0)
def test_iid_cache_populated_on_first_call(self):
"""IID cache should store parsed InstrumentId after first access."""
# Pre-patch so mock cache resolves correctly
self._patch_iid(self.feed, 'ETHUSDT', 'BINANCE')
_ = self.feed.bid('ETHUSDT')
self.assertIn('ETHUSDT', self.feed._iid_cache)
def test_satisfies_price_feed_protocol(self):
self.assertIsInstance(self.feed, PriceFeed)
class TestPriceFeedProtocol(unittest.TestCase):
"""Verify all implementations satisfy the PriceFeed Protocol at runtime."""
def test_null_is_price_feed(self):
self.assertIsInstance(NullPriceFeed(), PriceFeed)
def test_constant_is_price_feed(self):
self.assertIsInstance(ConstantPriceFeed(), PriceFeed)
def test_nautilus_is_price_feed(self):
feed = NautilusCachePriceFeed(MagicMock())
self.assertIsInstance(feed, PriceFeed)
def test_custom_class_satisfying_protocol(self):
"""Any class with the right methods satisfies PriceFeed structurally."""
class MinimalFeed:
def bid(self, s): return 1.0
def ask(self, s): return 1.0
def mid(self, s): return 1.0
def last_update_ns(self, s): return 0
self.assertIsInstance(MinimalFeed(), PriceFeed)
def test_incomplete_class_does_not_satisfy_protocol(self):
"""A class missing methods should NOT satisfy PriceFeed."""
class IncompleteFeed:
def bid(self, s): return 1.0
# missing ask, mid, last_update_ns
self.assertNotIsInstance(IncompleteFeed(), PriceFeed)
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,212 @@
"""Tests for MetricsMonitor."""
import pytest
from datetime import datetime
from unittest.mock import Mock
from nautilus_dolphin.nautilus.metrics_monitor import (
MetricsMonitor,
ThresholdConfig,
Alert
)
class TestMetricsMonitor:
def test_record_fill_updates_counters(self):
"""Test fill recording updates maker/taker counters."""
monitor = MetricsMonitor()
monitor.record_fill('maker', 0.5)
monitor.record_fill('maker', 0.3)
monitor.record_fill('taker', 1.2)
assert monitor._maker_fills == 2
assert monitor._taker_fills == 1
def test_get_maker_fill_rate(self):
"""Test maker fill rate calculation."""
monitor = MetricsMonitor()
# Default when no fills
assert monitor.get_maker_fill_rate() == 100.0
# After some fills
monitor.record_fill('maker', 0.5)
monitor.record_fill('maker', 0.3)
monitor.record_fill('taker', 1.2)
assert monitor.get_maker_fill_rate() == (2/3) * 100
def test_get_average_slippage(self):
"""Test average slippage calculation."""
monitor = MetricsMonitor()
# Default when no data
assert monitor.get_average_slippage() == 0.0
# After fills
monitor.record_fill('maker', 2.0)
monitor.record_fill('taker', 4.0)
monitor.record_fill('maker', 6.0)
assert monitor.get_average_slippage() == 4.0
def test_record_trade_result_updates_pnl(self):
"""Test trade result recording updates P&L."""
monitor = MetricsMonitor()
monitor.record_trade_result(100.0)
monitor.record_trade_result(-50.0)
monitor.record_trade_result(200.0)
assert monitor._total_pnl == 250.0
assert monitor._winning_trades == 2
assert monitor._total_trades == 3
def test_get_win_rate(self):
"""Test win rate calculation."""
monitor = MetricsMonitor()
# Default when no trades
assert monitor.get_win_rate() == 0.0
monitor.record_trade_result(100.0)
monitor.record_trade_result(200.0)
monitor.record_trade_result(-50.0)
assert monitor.get_win_rate() == (2/3) * 100
def test_get_profit_factor(self):
"""Test profit factor calculation."""
monitor = MetricsMonitor()
monitor.record_trade_result(100.0)
monitor.record_trade_result(200.0)
monitor.record_trade_result(-50.0)
assert monitor.get_profit_factor() == 300.0 / 50.0
def test_profit_factor_no_losses(self):
"""Test profit factor when no losses."""
monitor = MetricsMonitor()
monitor.record_trade_result(100.0)
assert monitor.get_profit_factor() == float('inf')
def test_alert_on_low_maker_fill_rate(self):
"""Test alert raised when maker fill rate drops below threshold."""
config = ThresholdConfig(
critical_maker_fill_rate=50.0,
warning_maker_fill_rate=60.0
)
monitor = MetricsMonitor(config)
# Add many taker fills to drop maker rate below threshold
for _ in range(10):
monitor.record_fill('taker', 1.0)
# Check alert was raised
alerts = [a for a in monitor._alerts if a.metric == 'maker_fill_rate']
assert len(alerts) > 0
assert alerts[0].level == 'critical'
def test_alert_on_high_slippage(self):
"""Test alert raised when slippage exceeds threshold."""
config = ThresholdConfig(
warning_slippage_bps=3.0,
critical_slippage_bps=5.0
)
monitor = MetricsMonitor(config)
monitor.record_fill('taker', 10.0)
# Check alert was raised
alerts = [a for a in monitor._alerts if a.metric == 'slippage']
assert len(alerts) > 0
assert alerts[0].level == 'critical'
def test_alert_deduplication(self):
"""Test alerts are deduplicated within 5-minute window."""
config = ThresholdConfig(critical_maker_fill_rate=50.0)
monitor = MetricsMonitor(config)
# Add many taker fills
for _ in range(20):
monitor.record_fill('taker', 1.0)
# Should only have 1 alert (deduplicated)
alerts = [a for a in monitor._alerts if a.metric == 'maker_fill_rate']
assert len(alerts) == 1
def test_add_alert_handler(self):
"""Test custom alert handler registration."""
monitor = MetricsMonitor()
handler = Mock()
monitor.add_alert_handler(handler)
# Trigger an alert
config = ThresholdConfig(critical_maker_fill_rate=50.0)
monitor.config = config
for _ in range(10):
monitor.record_fill('taker', 1.0)
# Handler should have been called
assert handler.called
def test_get_metrics_summary(self):
"""Test metrics summary report."""
monitor = MetricsMonitor()
monitor.record_fill('maker', 0.5)
monitor.record_trade_result(100.0)
summary = monitor.get_metrics_summary()
assert 'maker_fill_rate_pct' in summary
assert 'average_slippage_bps' in summary
assert 'total_trades' in summary
assert 'win_rate_pct' in summary
assert 'profit_factor' in summary
assert 'total_pnl' in summary
def test_prometheus_export(self):
"""Test Prometheus format export."""
monitor = MetricsMonitor()
monitor.record_fill('maker', 0.5)
monitor.record_trade_result(100.0)
prometheus = monitor.get_prometheus_metrics()
assert 'dolphin_maker_fill_rate_pct' in prometheus
assert 'dolphin_slippage_bps' in prometheus
assert 'dolphin_total_trades' in prometheus
assert 'dolphin_win_rate_pct' in prometheus
assert 'dolphin_profit_factor' in prometheus
assert 'dolphin_total_pnl' in prometheus
class TestThresholdConfig:
def test_default_thresholds(self):
"""Test default threshold configuration."""
config = ThresholdConfig()
assert config.min_maker_fill_rate == 48.0
assert config.max_slippage_bps == 5.0
assert config.critical_maker_fill_rate == 48.0
assert config.warning_slippage_bps == 3.0
def test_custom_thresholds(self):
"""Test custom threshold configuration."""
config = ThresholdConfig(
min_maker_fill_rate=60.0,
max_slippage_bps=3.0,
critical_maker_fill_rate=55.0
)
assert config.min_maker_fill_rate == 60.0
assert config.max_slippage_bps == 3.0
assert config.critical_maker_fill_rate == 55.0

View File

@@ -0,0 +1,513 @@
"""
CRITICAL TEST: Nautilus-Dolphin vs Standalone DOLPHIN Comparison
================================================================
This test verifies that Nautilus-Dolphin produces IDENTICAL results
to the standalone DOLPHIN implementation (itest_v7.py).
MUST MATCH:
- Trade count
- Win rate
- Profit factor
- ROI
- Entry/exit prices
- P&L per trade
- Exit types
"""
import json
import pytest
from pathlib import Path
from typing import Dict, List, Any
from dataclasses import dataclass
# ── Configuration ────────────────────────────────────────────────────────────
# Match the itest_v7 "tight_3_3" strategy configuration
REFERENCE_STRATEGY = "tight_3_3"
REFERENCE_RESULTS_FILE = Path(__file__).parent.parent.parent / "itest_v7_results.json"
REFERENCE_TRADES_FILE = Path(__file__).parent.parent.parent / "itest_v7_trades.jsonl"
TOLERANCE_PCT = 0.001 # 0.1% tolerance for floating point differences
@dataclass
class Trade:
"""Trade record for comparison."""
strategy: str
date: str
scan_idx: int
direction: str
entry_price: float
exit_price: float
exit_type: str
bars_held: int
leverage: float
notional: float
gross_pnl: float
fees: float
net_pnl: float
is_winner: bool
trade_asset: str
@dataclass
class StrategyMetrics:
"""Strategy metrics for comparison."""
name: str
capital: float
roi_pct: float
trades: int
wins: int
win_rate: float
profit_factor: float
avg_win: float
avg_loss: float
stop_exits: int
trailing_exits: int
target_exits: int
hold_exits: int
# Global storage for loaded data
_ref_results = None
_ref_trades = None
def load_reference_data():
"""Load reference data once."""
global _ref_results, _ref_trades
if _ref_results is None:
if REFERENCE_RESULTS_FILE.exists():
with open(REFERENCE_RESULTS_FILE, 'r') as f:
_ref_results = json.load(f)
if _ref_trades is None:
if REFERENCE_TRADES_FILE.exists():
_ref_trades = []
with open(REFERENCE_TRADES_FILE, 'r') as f:
for line in f:
data = json.loads(line.strip())
if data.get('strategy') == REFERENCE_STRATEGY:
_ref_trades.append(Trade(
strategy=data['strategy'],
date=data['date'],
scan_idx=data['scan_idx'],
direction=data['direction'],
entry_price=data['entry_price'],
exit_price=data['exit_price'],
exit_type=data['exit_type'],
bars_held=data['bars_held'],
leverage=data['leverage'],
notional=data['notional'],
gross_pnl=data['gross_pnl'],
fees=data['fees'],
net_pnl=data['net_pnl'],
is_winner=data['is_winner'],
trade_asset=data['trade_asset']
))
return _ref_results, _ref_trades
class TestNDvsStandaloneComparison:
"""Test Nautilus-Dolphin matches standalone DOLPHIN results."""
def test_reference_results_exist(self):
"""Verify reference results file exists and has expected structure."""
reference_results, _ = load_reference_data()
if reference_results is None:
pytest.skip(f"Reference results not found: {REFERENCE_RESULTS_FILE}")
assert 'strategies' in reference_results
assert REFERENCE_STRATEGY in reference_results['strategies']
assert 'total_scans' in reference_results
print(f"\nReference data loaded: {reference_results['total_scans']} scans")
def test_reference_trades_exist(self):
"""Verify reference trades exist for the strategy."""
_, reference_trades = load_reference_data()
if reference_trades is None:
pytest.skip(f"Reference trades not found: {REFERENCE_TRADES_FILE}")
assert len(reference_trades) > 0
print(f"\nReference trades loaded: {len(reference_trades)} trades for {REFERENCE_STRATEGY}")
def test_strategy_metrics_match(self):
"""Verify ND produces matching high-level metrics.
This test compares:
- Trade count
- Win rate
- Profit factor
- ROI
"""
reference_results, _ = load_reference_data()
if reference_results is None:
pytest.skip("Reference results not available")
ref_strategy = reference_results['strategies'][REFERENCE_STRATEGY]
# Store reference metrics for comparison
ref_metrics = StrategyMetrics(
name=REFERENCE_STRATEGY,
capital=ref_strategy['capital'],
roi_pct=ref_strategy['roi_pct'],
trades=ref_strategy['trades'],
wins=ref_strategy['wins'],
win_rate=ref_strategy['win_rate'],
profit_factor=ref_strategy['profit_factor'],
avg_win=ref_strategy['avg_win'],
avg_loss=ref_strategy['avg_loss'],
stop_exits=ref_strategy['stop_exits'],
trailing_exits=ref_strategy['trailing_exits'],
target_exits=ref_strategy['target_exits'],
hold_exits=ref_strategy['hold_exits']
)
# Log reference metrics
print(f"\n{'='*60}")
print(f"Reference Strategy: {REFERENCE_STRATEGY}")
print(f"{'='*60}")
print(f"Capital: ${ref_metrics.capital:,.2f}")
print(f"ROI: {ref_metrics.roi_pct:.2f}%")
print(f"Trades: {ref_metrics.trades}")
print(f"Win Rate: {ref_metrics.win_rate:.2f}%")
print(f"Profit Factor: {ref_metrics.profit_factor:.4f}")
print(f"Avg Win: ${ref_metrics.avg_win:.2f}")
print(f"Avg Loss: ${ref_metrics.avg_loss:.2f}")
print(f"Exit Types: stop={ref_metrics.stop_exits}, trail={ref_metrics.trailing_exits}, target={ref_metrics.target_exits}, hold={ref_metrics.hold_exits}")
# Basic sanity checks on reference data
assert ref_metrics.trades > 100, "Expected significant number of trades"
assert 0 < ref_metrics.win_rate < 100, "Win rate should be between 0-100%"
assert ref_metrics.capital > 0, "Capital should be positive"
def test_trade_details_structure(self):
"""Verify structure of reference trades."""
_, reference_trades = load_reference_data()
if not reference_trades:
pytest.skip("No reference trades loaded")
trade = reference_trades[0]
# Check required fields exist
assert trade.strategy == REFERENCE_STRATEGY
assert trade.entry_price > 0
assert trade.exit_price > 0
assert trade.notional > 0
assert trade.exit_type in ['trailing_stop', 'stop_loss', 'target', 'max_hold']
print(f"\nSample trade: {trade.trade_asset} {trade.direction}")
print(f" Date: {trade.date}, Scan: {trade.scan_idx}")
print(f" Entry: ${trade.entry_price:.2f} -> Exit: ${trade.exit_price:.2f}")
print(f" P&L: ${trade.net_pnl:.4f}, Exit Type: {trade.exit_type}")
print(f" Bars: {trade.bars_held}, Leverage: {trade.leverage}x")
def test_exit_type_distribution(self):
"""Verify exit type distribution matches expectations."""
reference_results, _ = load_reference_data()
if reference_results is None:
pytest.skip("Reference results not available")
ref_strategy = reference_results['strategies'][REFERENCE_STRATEGY]
total_exits = (
ref_strategy['stop_exits'] +
ref_strategy['trailing_exits'] +
ref_strategy['target_exits'] +
ref_strategy['hold_exits']
)
assert total_exits == ref_strategy['trades'], "Exit count should match trade count"
# Log distribution
print(f"\nExit Type Distribution:")
print(f" Trailing: {ref_strategy['trailing_exits']} ({100*ref_strategy['trailing_exits']/ref_strategy['trades']:.1f}%)")
print(f" Stop: {ref_strategy['stop_exits']} ({100*ref_strategy['stop_exits']/ref_strategy['trades']:.1f}%)")
print(f" Target: {ref_strategy['target_exits']} ({100*ref_strategy['target_exits']/ref_strategy['trades']:.1f}%)")
print(f" Hold: {ref_strategy['hold_exits']} ({100*ref_strategy['hold_exits']/ref_strategy['trades']:.1f}%)")
def test_pnl_calculation_consistency(self):
"""Verify P&L calculations in reference trades are consistent.
Checks: gross_pnl - fees = net_pnl (within tolerance)
"""
_, reference_trades = load_reference_data()
if not reference_trades:
pytest.skip("No reference trades loaded")
calc_errors = []
winner_errors = []
for i, trade in enumerate(reference_trades[:100]): # Check first 100
# Check 1: Verify gross_pnl - fees = net_pnl
calc_net = trade.gross_pnl - trade.fees
if abs(calc_net - trade.net_pnl) > 0.01:
calc_errors.append(i)
# Check 2: Verify is_winner matches net_pnl sign
# A trade is a winner if net_pnl > 0 (strictly positive)
expected_winner = trade.net_pnl > 0
if expected_winner != trade.is_winner:
winner_errors.append(i)
# Report findings
print(f"\nP&L Calculation Check (first 100 trades):")
print(f" Calculation errors: {len(calc_errors)} ({len(calc_errors)}%)")
print(f" Winner flag errors: {len(winner_errors)} ({len(winner_errors)}%)")
if calc_errors[:5]:
print(f" Sample calc errors: {calc_errors[:5]}")
# The key check: gross_pnl - fees should equal net_pnl
# Some small discrepancies are acceptable due to rounding
calc_error_rate = len(calc_errors) / min(100, len(reference_trades))
assert calc_error_rate < 0.05, f"Too many P&L calculation errors: {calc_error_rate:.1%}"
def test_nd_configuration_matches(self):
"""Verify ND configuration matches standalone.
This test ensures the Nautilus-Dolphin configuration
matches the itest_v7 tight_3_3 configuration.
"""
from nautilus_dolphin.nautilus.strategy_registration import DolphinStrategyConfig
# ND configuration
nd_config = DolphinStrategyConfig(
venue="BINANCE_FUTURES",
max_leverage=2.5, # From itest_v7
capital_fraction=0.15, # From itest_v7
tp_bps=99, # ~1% target (not heavily used in tight_3_3)
max_hold_bars=120, # From itest_v7
acb_enabled=True,
)
# Key parameters that MUST match itest_v7
assert nd_config.max_leverage == 2.5, "Leverage must match"
assert nd_config.capital_fraction == 0.15, "Capital fraction must match"
assert nd_config.max_hold_bars == 120, "Max hold must match"
print(f"\nND Configuration validated:")
print(f" Max Leverage: {nd_config.max_leverage}x")
print(f" Capital Fraction: {nd_config.capital_fraction}")
print(f" Max Hold Bars: {nd_config.max_hold_bars}")
print(f" ACB Enabled: {nd_config.acb_enabled}")
class TestNDSignalGenerationStack:
"""Test Nautilus-Dolphin signal generation stack works correctly."""
def test_data_adapter_imports(self):
"""Verify data adapter components import correctly."""
from nautilus_dolphin.nautilus.data_adapter import (
JSONEigenvalueDataAdapter,
BacktestDataLoader
)
assert JSONEigenvalueDataAdapter is not None
assert BacktestDataLoader is not None
def test_data_catalog_imports(self):
"""Verify data catalog components import correctly."""
from nautilus_dolphin.nautilus.data_catalogue import (
DataCatalogueConfig,
BacktestEngineConfig,
DataImporter
)
assert DataCatalogueConfig is not None
assert BacktestEngineConfig is not None
assert DataImporter is not None
def test_strategy_can_calculate_position_size(self):
"""Verify strategy can calculate position sizes matching itest_v7."""
from nautilus_dolphin.nautilus.strategy import DolphinExecutionStrategyForTesting
strategy = DolphinExecutionStrategyForTesting({
'venue': 'BINANCE_FUTURES',
'max_leverage': 2.5,
'capital_fraction': 0.15,
'acb_enabled': False, # Disable ACB for this test
})
# Test signal matching itest_v7 parameters
signal = {
'strength': 0.75,
'bucket_boost': 1.0,
'streak_mult': 1.0,
'trend_mult': 1.0,
}
account_balance = 10000.0
notional = strategy.calculate_position_size(signal, account_balance)
# itest_v7: notional = 10000 * 0.15 * 2.5 = 3750
expected_base = account_balance * 0.15 * 2.5
print(f"\nPosition Size Calculation:")
print(f" Account: ${account_balance:,.2f}")
print(f" Calculated Notional: ${notional:,.2f}")
print(f" Expected (itest_v7): ${expected_base:,.2f}")
# Allow for minor differences due to ACB or other factors
assert notional > 0, "Notional must be positive"
assert notional <= account_balance * 0.5, "Notional should respect sanity cap"
def test_strategy_filters_match(self):
"""Verify strategy filters match itest_v7 logic."""
from nautilus_dolphin.nautilus.strategy import DolphinExecutionStrategyForTesting
strategy = DolphinExecutionStrategyForTesting({
'venue': 'BINANCE_FUTURES',
'irp_alignment_min': 0.45,
'momentum_magnitude_min': 0.000075,
'excluded_assets': ['TUSDUSDT', 'USDCUSDT'],
'max_concurrent_positions': 10,
})
# Test valid signal
valid_signal = {
'irp_alignment': 0.5,
'direction_confirm': True,
'lookback_momentum': 0.0001,
'asset': 'BTCUSDT',
}
# Manually set volatility detector to high regime
strategy.volatility_detector._regime = 'high'
strategy.volatility_detector._history = [0.0001] * 150
result = strategy._should_trade(valid_signal)
print(f"\nValid signal check: '{result}'")
# Test excluded asset
excluded_signal = {
'irp_alignment': 0.5,
'direction_confirm': True,
'lookback_momentum': 0.0001,
'asset': 'USDCUSDT', # Excluded
}
result_excluded = strategy._should_trade(excluded_signal)
print(f"Excluded asset check: '{result_excluded}'")
assert result_excluded == "asset_excluded", "Should reject excluded asset"
class TestTradeByTradeComparison:
"""Trade-by-trade comparison between ND and standalone.
This is the MOST CRITICAL test - every trade must match.
"""
def test_first_10_trades_structure(self):
"""Verify structure of first 10 reference trades."""
_, reference_trades = load_reference_data()
if not reference_trades:
pytest.skip("No reference trades loaded")
print(f"\n{'='*60}")
print("First 10 Reference Trades:")
print(f"{'='*60}")
for i, trade in enumerate(reference_trades[:10]):
print(f"\nTrade {i+1}: {trade.trade_asset} {trade.direction}")
print(f" Date: {trade.date}, Scan: {trade.scan_idx}")
print(f" Entry: ${trade.entry_price:.2f} -> Exit: ${trade.exit_price:.2f}")
print(f" P&L: ${trade.net_pnl:.4f}, Exit: {trade.exit_type}")
print(f" Bars: {trade.bars_held}, Leverage: {trade.leverage}x")
def test_entry_exit_prices_are_reasonable(self):
"""Verify entry/exit prices are within reasonable ranges."""
_, reference_trades = load_reference_data()
if not reference_trades:
pytest.skip("No reference trades loaded")
crypto_assets = {
'BTCUSDT': (20000, 100000),
'ETHUSDT': (1000, 5000),
'ADAUSDT': (0.2, 2.0),
'SOLUSDT': (10, 200),
}
unreasonable = 0
for trade in reference_trades[:100]:
# Check if prices are positive
if trade.entry_price <= 0 or trade.exit_price <= 0:
unreasonable += 1
continue
# Check price range for known assets
for asset, (min_p, max_p) in crypto_assets.items():
if trade.trade_asset == asset:
if not (min_p <= trade.entry_price <= max_p):
unreasonable += 1
break
error_rate = unreasonable / min(100, len(reference_trades))
assert error_rate < 0.1, f"Too many unreasonable prices: {error_rate:.1%}"
def test_leverage_is_consistent(self):
"""Verify all trades use expected leverage."""
_, reference_trades = load_reference_data()
if not reference_trades:
pytest.skip("No reference trades loaded")
leverages = set(t.leverage for t in reference_trades)
print(f"\nLeverage values used: {leverages}")
# itest_v7 uses 2.5x leverage for tight_3_3
assert 2.5 in leverages, "Expected 2.5x leverage in trades"
def test_fees_are_calculated(self):
"""Verify fees are calculated for all trades."""
_, reference_trades = load_reference_data()
if not reference_trades:
pytest.skip("No reference trades loaded")
trades_with_fees = sum(1 for t in reference_trades if t.fees > 0)
fee_rate = trades_with_fees / len(reference_trades)
print(f"\nFee coverage: {trades_with_fees}/{len(reference_trades)} ({fee_rate:.1%})")
# All trades should have fees
assert fee_rate > 0.99, "Expected fees on almost all trades"
# ── Main Comparison Test ─────────────────────────────────────────────────────
@pytest.mark.skip(reason="Full ND backtest comparison - run after ND backtest implementation")
class TestFullNDvsStandaloneBacktest:
"""Full backtest comparison - requires ND backtest results."""
def test_nd_backtest_produces_results(self):
"""Verify ND backtest runs and produces results."""
# TODO: Run ND backtest and load results
pass
def test_trade_count_matches(self):
"""Verify ND produces same number of trades."""
reference_results, _ = load_reference_data()
if reference_results is None:
pytest.skip("Reference results not available")
ref_trades = reference_results['strategies'][REFERENCE_STRATEGY]['trades']
# TODO: Compare with ND results
pass
def test_trade_by_trade_match(self):
"""CRITICAL: Verify every trade matches."""
_, reference_trades = load_reference_data()
if not reference_trades:
pytest.skip("Reference trades not available")
# TODO: Implement trade-by-trade comparison
pass

View File

@@ -0,0 +1,111 @@
"""Tests for PositionManager."""
import pytest
from unittest.mock import Mock
from nautilus_dolphin.nautilus.position_manager import PositionManager
from nautilus_trader.model.enums import PositionSide
class TestPositionManager:
def test_on_position_opened_calculates_tp(self):
"""Test TP price calculation on position open."""
strategy = Mock()
pm = PositionManager(strategy, tp_bps=99, max_hold_bars=120)
position = Mock()
position.id = "POS-001"
position.instrument_id = "BTCUSDT.BINANCE"
position.avg_px_open = 50000.0
position.side = PositionSide.SHORT
pm.on_position_opened(position)
assert "BTCUSDT.BINANCE" in pm.positions
metadata = pm.positions["BTCUSDT.BINANCE"]
# TP should be entry * (1 - 99/10000) = 50000 * 0.9901
expected_tp = 50000.0 * 0.9901
assert abs(metadata['tp_price'] - expected_tp) < 0.01
def test_on_bar_increments_bars_held(self):
"""Test bars_held counter increments."""
strategy = Mock()
strategy.cache = Mock()
pm = PositionManager(strategy)
position = Mock()
position.id = "POS-001"
position.instrument_id = "BTCUSDT.BINANCE"
position.avg_px_open = 50000.0
position.side = PositionSide.SHORT
position.is_closed = False
pm.on_position_opened(position)
strategy.cache.position.return_value = position
bar = Mock()
bar.instrument_id = "BTCUSDT.BINANCE"
bar.close = 50000.0
pm.on_bar(bar)
assert pm.positions["BTCUSDT.BINANCE"]['bars_held'] == 1
pm.on_bar(bar)
assert pm.positions["BTCUSDT.BINANCE"]['bars_held'] == 2
def test_tp_exit_triggered(self):
"""Test TP exit when price hits target."""
strategy = Mock()
strategy.cache = Mock()
strategy.order_factory = Mock()
pm = PositionManager(strategy, tp_bps=99)
position = Mock()
position.id = "POS-001"
position.instrument_id = "BTCUSDT.BINANCE"
position.avg_px_open = 50000.0
position.side = PositionSide.SHORT
position.is_closed = False
position.quantity = 1.0
pm.on_position_opened(position)
strategy.cache.position.return_value = position
bar = Mock()
bar.instrument_id = "BTCUSDT.BINANCE"
bar.close = 49505.0 # Below TP
pm.on_bar(bar)
# Should have called order_factory.market
strategy.order_factory.market.assert_called_once()
def test_max_hold_exit_triggered(self):
"""Test max hold exit after 120 bars."""
strategy = Mock()
strategy.cache = Mock()
strategy.order_factory = Mock()
pm = PositionManager(strategy, max_hold_bars=3)
position = Mock()
position.id = "POS-001"
position.instrument_id = "BTCUSDT.BINANCE"
position.avg_px_open = 50000.0
position.side = PositionSide.SHORT
position.is_closed = False
position.quantity = 1.0
pm.on_position_opened(position)
strategy.cache.position.return_value = position
bar = Mock()
bar.instrument_id = "BTCUSDT.BINANCE"
bar.close = 50100.0 # Above TP
# Trigger max hold
for _ in range(3):
pm.on_bar(bar)
# Should have called order_factory.market
strategy.order_factory.market.assert_called_once()

View File

@@ -0,0 +1,552 @@
"""
Tests for proxy_boost_engine — unit + e2e regression.
Unit tests run fast (no backtest data required).
E2E tests are marked @pytest.mark.slow and run the full 55-day backtest
against the known gold results. Skip with: pytest -m "not slow"
"""
import math
import sys
from pathlib import Path
import numpy as np
import pytest
# Make sure the project root is on sys.path
_ROOT = Path(__file__).resolve().parent.parent
if str(_ROOT) not in sys.path:
sys.path.insert(0, str(_ROOT))
from nautilus_dolphin.nautilus.proxy_boost_engine import (
BOOST_MODES,
DEFAULT_ALPHA,
DEFAULT_BOOST_MODE,
DEFAULT_THRESHOLD,
AdaptiveBoostEngine,
ExtendedLeverageEngine,
LiquidationGuardEngine,
ProxyBaseEngine,
create_boost_engine,
create_d_liq_engine,
D_LIQ_SOFT_CAP, D_LIQ_ABS_CAP, D_LIQ_MC_REF, D_LIQ_MARGIN_BUF,
)
from nautilus_dolphin.nautilus.esf_alpha_orchestrator import NDAlphaEngine
# ── Shared engine_kwargs (minimal valid set, no data needed) ─────────────────
_ENG_KW = dict(
initial_capital=25000.0,
vel_div_threshold=-0.02,
vel_div_extreme=-0.05,
min_leverage=0.5,
max_leverage=5.0,
leverage_convexity=3.0,
fraction=0.2,
fixed_tp_pct=0.0095,
stop_pct=1.0,
max_hold_bars=120,
use_direction_confirm=False,
use_asset_selection=False,
use_sp_fees=False,
use_sp_slippage=False,
use_ob_edge=False,
lookback=10,
use_alpha_layers=False,
use_dynamic_leverage=False,
seed=42,
)
# ═══════════════════════════════════════════════════════════════════════════════
# Unit tests
# ═══════════════════════════════════════════════════════════════════════════════
class TestCreateBoostEngine:
def test_none_returns_ndalphaengine(self):
eng = create_boost_engine(mode='none', **_ENG_KW)
assert type(eng) is NDAlphaEngine
def test_default_returns_d_liq(self):
eng = create_boost_engine(**_ENG_KW)
assert isinstance(eng, LiquidationGuardEngine)
assert eng.adaptive_beta is True
assert eng._extended_soft_cap == D_LIQ_SOFT_CAP
assert eng._extended_abs_cap == D_LIQ_ABS_CAP
def test_fixed_mode(self):
eng = create_boost_engine(mode='fixed', **_ENG_KW)
assert isinstance(eng, AdaptiveBoostEngine)
assert eng.adaptive_alpha is False
assert eng.adaptive_thr is False
assert eng.adaptive_beta is False
def test_adaptive_alpha_mode(self):
eng = create_boost_engine(mode='adaptive_alpha', **_ENG_KW)
assert eng.adaptive_alpha is True
assert eng.adaptive_thr is False
assert eng.adaptive_beta is False
def test_adaptive_thr_mode(self):
eng = create_boost_engine(mode='adaptive_thr', **_ENG_KW)
assert eng.adaptive_alpha is False
assert eng.adaptive_thr is True
assert eng.adaptive_beta is False
def test_adaptive_both_mode(self):
eng = create_boost_engine(mode='adaptive_both', **_ENG_KW)
assert eng.adaptive_alpha is True
assert eng.adaptive_thr is True
assert eng.adaptive_beta is False
def test_adaptive_beta_mode(self):
eng = create_boost_engine(mode='adaptive_beta', **_ENG_KW)
assert eng.adaptive_beta is True
def test_invalid_mode_raises(self):
with pytest.raises(ValueError, match="Unknown boost mode"):
create_boost_engine(mode='bogus', **_ENG_KW)
def test_all_valid_modes_construct(self):
for mode in BOOST_MODES:
eng = create_boost_engine(mode=mode, **_ENG_KW)
assert eng is not None
def test_d_liq_mode_returns_liquidation_guard_engine(self):
eng = create_boost_engine(mode='d_liq', **_ENG_KW)
assert isinstance(eng, LiquidationGuardEngine)
assert eng._extended_soft_cap == D_LIQ_SOFT_CAP
assert eng._extended_abs_cap == D_LIQ_ABS_CAP
assert eng._mc_leverage_ref == D_LIQ_MC_REF
assert eng.adaptive_beta is True
def test_d_liq_overrides_max_leverage_kwarg(self):
"""max_leverage from engine_kwargs must be ignored for d_liq — caps are hardcoded."""
eng = create_boost_engine(mode='d_liq', **{**_ENG_KW, 'max_leverage': 3.0})
assert eng.base_max_leverage == D_LIQ_SOFT_CAP # 8.0, not 3.0
assert eng.abs_max_leverage == D_LIQ_ABS_CAP # 9.0
def test_d_liq_is_default_mode(self):
assert DEFAULT_BOOST_MODE == 'd_liq'
def test_d_liq_in_boost_modes(self):
assert 'd_liq' in BOOST_MODES
def test_custom_threshold_alpha(self):
eng = create_boost_engine(mode='fixed', threshold=0.5, alpha=2.0, **_ENG_KW)
assert eng.threshold == 0.5
assert eng.alpha == 2.0
def test_default_threshold_alpha(self):
eng = create_boost_engine(mode='fixed', **_ENG_KW)
assert eng.threshold == DEFAULT_THRESHOLD
assert eng.alpha == DEFAULT_ALPHA
class TestProxyBaseEngine:
def setup_method(self):
self.eng = ProxyBaseEngine(**_ENG_KW)
def test_initial_proxy_state(self):
assert self.eng._current_proxy_b == 0.0
assert self.eng._proxy_b_history == []
def test_update_proxy(self):
pb = self.eng._update_proxy(inst=0.5, v750=0.3)
assert pb == pytest.approx(0.2)
assert self.eng._current_proxy_b == pytest.approx(0.2)
assert len(self.eng._proxy_b_history) == 1
def test_prank_empty_history(self):
assert self.eng._proxy_prank() == 0.5
def test_prank_single_entry(self):
self.eng._update_proxy(0.5, 0.3) # pb = 0.2
# Only one value in history; current value equals history[0]
# sum(v < 0.2 for v in [0.2]) / 1 = 0
assert self.eng._proxy_prank() == 0.0
def test_prank_ordering(self):
# Feed values 0.1, 0.2, 0.3, 0.4 (current=0.4)
for v in [0.1, 0.2, 0.3, 0.4]:
self.eng._update_proxy(v, 0.0)
# prank(0.4) = count(v < 0.4 in [0.1,0.2,0.3,0.4]) / 4 = 3/4
assert self.eng._proxy_prank() == pytest.approx(3 / 4)
def test_history_truncation(self):
for i in range(600):
self.eng._update_proxy(float(i), 0.0)
assert len(self.eng._proxy_b_history) == 500
def test_pre_bar_proxy_update(self):
self.eng.pre_bar_proxy_update(inst50=1.0, v750=0.4)
assert self.eng._current_proxy_b == pytest.approx(0.6)
assert len(self.eng._proxy_b_history) == 1
def test_reset_clears_proxy_state(self):
self.eng._update_proxy(1.0, 0.5)
self.eng.reset()
assert self.eng._current_proxy_b == 0.0
assert self.eng._proxy_b_history == []
class TestAdaptiveBoostEngine:
def test_boost_mode_str_fixed(self):
eng = create_boost_engine(mode='fixed', **_ENG_KW)
assert eng.boost_mode_str == 'fixed'
def test_boost_mode_str_winner(self):
eng = create_boost_engine(mode='adaptive_beta', **_ENG_KW)
assert eng.boost_mode_str == 'adaptive_beta'
def test_boost_mode_str_adaptive_both(self):
eng = create_boost_engine(mode='adaptive_both', **_ENG_KW)
assert eng.boost_mode_str == 'adaptive_both'
def test_sizing_scale_mean_starts_at_1(self):
eng = create_boost_engine(mode='fixed', **_ENG_KW)
assert eng.sizing_scale_mean == 1.0
def test_scale_formula_fixed(self):
"""
scale = 1 + alpha * max(0, threshold - prank)
With prank=0.1, thr=0.35, alpha=1.0 → scale = 1 + 1.0*(0.35-0.1) = 1.25
"""
eng = AdaptiveBoostEngine(threshold=0.35, alpha=1.0, **_ENG_KW)
# Manually set up a known prank
# Load history so that current value is at ~10th percentile
for i in range(9):
eng._proxy_b_history.append(float(i + 1))
eng._proxy_b_history.append(0.5) # current (rank = 0/10 → prank=0)
eng._current_proxy_b = 0.5
prank = eng._proxy_prank()
expected_scale = 1.0 + 1.0 * max(0.0, 0.35 - prank)
# Just verify formula is self-consistent
assert expected_scale >= 1.0
def test_no_scale_when_prank_above_threshold(self):
"""When proxy_B is in top 70% (stressed), scale should be 1.0."""
eng = AdaptiveBoostEngine(threshold=0.35, alpha=1.0, **_ENG_KW)
# Set prank > 0.35: push 9 values lower, current is highest
for i in range(9):
eng._proxy_b_history.append(0.0)
eng._proxy_b_history.append(1.0)
eng._current_proxy_b = 1.0
prank = eng._proxy_prank()
assert prank > 0.35
scale = 1.0 + 1.0 * max(0.0, 0.35 - prank)
assert scale == pytest.approx(1.0)
def test_reset_clears_scale_history(self):
eng = create_boost_engine(mode='fixed', **_ENG_KW)
eng._scale_history.append(1.2)
eng._alpha_eff_history.append(1.0)
eng._thr_eff_history.append(0.35)
eng.reset()
assert eng._scale_history == []
assert eng._alpha_eff_history == []
assert eng._thr_eff_history == []
def test_adaptive_beta_alpha_scaled_by_beta(self):
"""alpha_eff = alpha * (1 + day_beta); verify on an isolated _try_entry call."""
eng = AdaptiveBoostEngine(threshold=1.0, alpha=1.0, adaptive_beta=True, **_ENG_KW)
# day_beta = 0.5 → alpha_eff = 1.5
eng._day_beta = 0.5
# prank = 0 (very calm), threshold = 1.0 → scale = 1 + 1.5 * max(0, 1.0-0) = 2.5
eng._current_proxy_b = -999.0 # guaranteed prank ≈ 0
for _ in range(20):
eng._proxy_b_history.append(0.0)
boost = max(1.0, getattr(eng, '_day_base_boost', 1.0))
beta = max(0.0, getattr(eng, '_day_beta', 0.0))
alpha_eff = eng.alpha * (1.0 + beta)
prank = eng._proxy_prank()
scale = 1.0 + alpha_eff * max(0.0, eng.threshold - prank)
assert scale == pytest.approx(1.0 + 1.5 * max(0.0, 1.0 - prank))
class TestExtendedLeverageEngine:
def test_extended_engine_constructs(self):
eng = ExtendedLeverageEngine(
extended_soft_cap=8.0, extended_abs_cap=9.0,
mc_leverage_ref=5.0, **_ENG_KW
)
assert eng._extended_soft_cap == 8.0
assert eng._extended_abs_cap == 9.0
assert eng._mc_leverage_ref == 5.0
assert eng.base_max_leverage == 8.0
assert eng.abs_max_leverage == 9.0
assert eng.bet_sizer.max_leverage == 8.0
def test_extended_engine_defaults(self):
eng = ExtendedLeverageEngine(**_ENG_KW)
assert eng._extended_soft_cap == 5.0
assert eng._extended_abs_cap == 6.0
assert eng._mc_leverage_ref == 5.0 # defaults to soft_cap
def test_mc_monitor_initialized(self):
eng = ExtendedLeverageEngine(**_ENG_KW)
assert eng.mc_monitor == dict(red=0, orange=0, ok=0, halted=0, total=0)
def test_extended_reset_restores_caps(self):
eng = ExtendedLeverageEngine(
extended_soft_cap=8.0, extended_abs_cap=9.0, **_ENG_KW
)
eng.base_max_leverage = 99.0 # corrupt
eng.reset()
assert eng.base_max_leverage == 8.0
assert eng.abs_max_leverage == 9.0
class TestLiquidationGuardEngine:
def test_liq_engine_constructs(self):
eng = LiquidationGuardEngine(
extended_soft_cap=8.0, extended_abs_cap=9.0,
mc_leverage_ref=5.0, margin_buffer=0.95, **_ENG_KW
)
assert eng.margin_buffer == 0.95
assert eng.liquidation_stops == 0
expected_floor = (1.0 / 9.0) * 0.95
assert abs(eng._liq_stop_pct - expected_floor) < 1e-9
def test_create_d_liq_engine(self):
eng = create_d_liq_engine(**_ENG_KW)
assert isinstance(eng, LiquidationGuardEngine)
assert eng._extended_soft_cap == D_LIQ_SOFT_CAP
assert eng._extended_abs_cap == D_LIQ_ABS_CAP
assert eng._mc_leverage_ref == D_LIQ_MC_REF
assert eng.margin_buffer == D_LIQ_MARGIN_BUF
assert eng.adaptive_beta is True
def test_liq_floor_pct_is_conservative(self):
"""Floor must be < 1/abs_cap (exit before exchange)."""
eng = create_d_liq_engine(**_ENG_KW)
assert eng._liq_stop_pct < 1.0 / D_LIQ_ABS_CAP
def test_liq_reset_recalculates_floor(self):
eng = create_d_liq_engine(**_ENG_KW)
eng.liquidation_stops = 99
eng.reset()
assert eng.liquidation_stops == 0
assert abs(eng._liq_stop_pct - (1.0 / D_LIQ_ABS_CAP) * D_LIQ_MARGIN_BUF) < 1e-9
class TestDolphinActorImport:
"""Verify dolphin_actor can be imported and has the right engine class."""
def test_actor_imports_create_boost_engine(self):
# Import should succeed and expose create_boost_engine
import importlib
actor_mod = importlib.import_module('nautilus_dolphin.nautilus.dolphin_actor')
assert hasattr(actor_mod, 'create_boost_engine')
def test_default_boost_mode_constant(self):
assert DEFAULT_BOOST_MODE == 'd_liq'
def test_d_liq_reachable_via_factory(self):
"""Verify DolphinActor's factory path reaches LiquidationGuardEngine for d_liq config."""
eng = create_boost_engine(mode='d_liq', **_ENG_KW)
assert isinstance(eng, LiquidationGuardEngine)
assert isinstance(eng, AdaptiveBoostEngine) # inheritance chain intact
# ═══════════════════════════════════════════════════════════════════════════════
# E2E regression tests (slow — require vbt_cache data, ~25 min)
# ═══════════════════════════════════════════════════════════════════════════════
pytestmark_slow = pytest.mark.slow
# Known-good results from exp6/exp7/exp8 (gold dataset, seed=42)
_GOLD = dict(roi=88.55, pf=1.215, dd=15.05, trades=2155)
_EXP8 = {
'none': dict(roi=88.55, dd=15.05),
'fixed': dict(roi=93.61, dd=14.51),
'adaptive_alpha': dict(roi=93.40, dd=14.51),
'adaptive_thr': dict(roi=94.13, dd=14.51),
'adaptive_both': dict(roi=94.11, dd=14.51),
'adaptive_beta': dict(roi=96.55, dd=14.32),
}
# Tolerance for floating-point reproducibility
_TOL_ROI = 0.1 # percentage points
_TOL_DD = 0.05 # percentage points
_TOL_TR = 5 # trade count
def _load_backtest_harness():
"""Import exp_shared and load data. Raises ImportError if data not available."""
sys.path.insert(0, str(_ROOT / 'dvae'))
from exp_shared import (
ensure_jit, ENGINE_KWARGS, MC_BASE_CFG,
load_data, load_forewarner,
)
from nautilus_dolphin.nautilus.adaptive_circuit_breaker import AdaptiveCircuitBreaker
return ensure_jit, ENGINE_KWARGS, MC_BASE_CFG, load_data, load_forewarner, AdaptiveCircuitBreaker
def _run_engine(eng, d, fw, MC_BASE_CFG):
"""Run full 55-day backtest; return (roi, dd, trades, pf)."""
from nautilus_dolphin.nautilus.adaptive_circuit_breaker import AdaptiveCircuitBreaker
acb = AdaptiveCircuitBreaker()
acb.preload_w750(d['date_strings'])
eng.set_ob_engine(d['ob_eng'])
eng.set_acb(acb)
if fw is not None:
eng.set_mc_forewarner(fw, MC_BASE_CFG)
eng.set_esoteric_hazard_multiplier(0.0)
daily_caps, daily_pnls = [], []
for pf_file in d['parquet_files']:
ds = pf_file.stem
df, acols, dvol = d['pq_data'][ds]
cap_before = eng.capital
import numpy as _np
vol_ok = _np.where(_np.isfinite(dvol), dvol > d['vol_p60'], False)
eng.process_day(ds, df, acols, vol_regime_ok=vol_ok)
daily_caps.append(eng.capital)
daily_pnls.append(eng.capital - cap_before)
tr = eng.trade_history
n = len(tr)
roi = (eng.capital - 25000.0) / 25000.0 * 100.0
def _abs(t):
return t.pnl_absolute if hasattr(t, 'pnl_absolute') else t.pnl_pct * 250.0
wins = [t for t in tr if _abs(t) > 0]
losses = [t for t in tr if _abs(t) <= 0]
pf = sum(_abs(t) for t in wins) / max(abs(sum(_abs(t) for t in losses)), 1e-9)
peak, max_dd = 25000.0, 0.0
for cap in daily_caps:
peak = max(peak, cap)
max_dd = max(max_dd, (peak - cap) / peak * 100.0)
return roi, max_dd, n, pf
@pytest.mark.slow
def test_e2e_baseline_matches_gold():
"""Baseline (mode='none') must reproduce gold metrics."""
try:
ensure_jit, ENGINE_KWARGS, MC_BASE_CFG, load_data, load_forewarner, _ = _load_backtest_harness()
except Exception as e:
pytest.skip(f"Backtest data not available: {e}")
ensure_jit()
d = load_data()
fw = load_forewarner()
eng = create_boost_engine(mode='none', **ENGINE_KWARGS)
roi, dd, trades, pf = _run_engine(eng, d, fw, MC_BASE_CFG)
assert abs(roi - _GOLD['roi']) < _TOL_ROI, f"ROI {roi:.2f} ≠ gold {_GOLD['roi']}"
assert abs(dd - _GOLD['dd']) < _TOL_DD, f"DD {dd:.2f} ≠ gold {_GOLD['dd']}"
assert abs(trades - _GOLD['trades']) < _TOL_TR, f"Trades {trades} ≠ gold {_GOLD['trades']}"
@pytest.mark.slow
@pytest.mark.parametrize("mode", [m for m in BOOST_MODES if m not in ('none', 'd_liq')])
def test_e2e_boost_mode_matches_exp8(mode):
"""Each boost mode must reproduce exp8 results within tolerance."""
try:
ensure_jit, ENGINE_KWARGS, MC_BASE_CFG, load_data, load_forewarner, _ = _load_backtest_harness()
except Exception as e:
pytest.skip(f"Backtest data not available: {e}")
ensure_jit()
d = load_data()
fw = load_forewarner()
eng = create_boost_engine(mode=mode, **ENGINE_KWARGS)
roi, dd, trades, pf = _run_engine(eng, d, fw, MC_BASE_CFG)
expected = _EXP8[mode]
assert abs(roi - expected['roi']) < _TOL_ROI, \
f"[{mode}] ROI {roi:.2f} ≠ exp8 {expected['roi']}"
assert abs(dd - expected['dd']) < _TOL_DD, \
f"[{mode}] DD {dd:.2f} ≠ exp8 {expected['dd']}"
# Trade count must be identical to baseline (no timing change)
assert abs(trades - _GOLD['trades']) < _TOL_TR, \
f"[{mode}] Trade count {trades} changed from baseline {_GOLD['trades']} — timing modified!"
@pytest.mark.slow
def test_e2e_winner_beats_baseline():
"""adaptive_beta must beat baseline on both ROI and DD."""
try:
ensure_jit, ENGINE_KWARGS, MC_BASE_CFG, load_data, load_forewarner, _ = _load_backtest_harness()
except Exception as e:
pytest.skip(f"Backtest data not available: {e}")
ensure_jit()
d = load_data()
fw = load_forewarner()
baseline_roi, baseline_dd, _, _ = _run_engine(
create_boost_engine(mode='none', **ENGINE_KWARGS), d, fw, MC_BASE_CFG)
winner_roi, winner_dd, winner_trades, _ = _run_engine(
create_boost_engine(mode='adaptive_beta', **ENGINE_KWARGS), d, fw, MC_BASE_CFG)
assert winner_roi > baseline_roi, \
f"adaptive_beta ROI {winner_roi:.2f} should exceed baseline {baseline_roi:.2f}"
assert winner_dd < baseline_dd, \
f"adaptive_beta DD {winner_dd:.2f} should be less than baseline {baseline_dd:.2f}"
assert abs(winner_trades - _GOLD['trades']) < _TOL_TR, \
"Trade count must not change (sizing-only modification)"
# D_LIQ_GOLD reference (exp9b: 8/9x leverage + liquidation guard)
_D_LIQ_GOLD = dict(roi=181.81, dd=17.65, trades=2155)
_TOL_ROI_LEV = 0.5 # wider tolerance for leverage experiments (larger absolute numbers)
_TOL_DD_LEV = 0.2
@pytest.mark.slow
def test_e2e_d_liq_gold_reproduces_exp9b():
"""D_LIQ_GOLD config (8/9x + liquidation guard) must reproduce exp9b results."""
try:
ensure_jit, ENGINE_KWARGS, MC_BASE_CFG, load_data, load_forewarner, _ = _load_backtest_harness()
except Exception as e:
pytest.skip(f"Backtest data not available: {e}")
ensure_jit()
d = load_data()
fw = load_forewarner()
eng = create_d_liq_engine(**ENGINE_KWARGS)
roi, dd, trades, _ = _run_engine(eng, d, fw, MC_BASE_CFG)
assert abs(roi - _D_LIQ_GOLD['roi']) < _TOL_ROI_LEV, \
f"D_LIQ ROI {roi:.2f} ≠ exp9b {_D_LIQ_GOLD['roi']}"
assert abs(dd - _D_LIQ_GOLD['dd']) < _TOL_DD_LEV, \
f"D_LIQ DD {dd:.2f} ≠ exp9b {_D_LIQ_GOLD['dd']}"
assert abs(trades - _D_LIQ_GOLD['trades']) < _TOL_TR, \
f"D_LIQ trades {trades} ≠ exp9b {_D_LIQ_GOLD['trades']}"
# Liquidation guard must not cascade (1 stop expected, rate << 0.1%)
assert eng.liquidation_stops <= 3, \
f"D_LIQ liquidation_stops={eng.liquidation_stops} — cascade suspected"
@pytest.mark.slow
def test_e2e_d_liq_mc_stays_silent():
"""MC-Forewarner must fire zero RED days at D_LIQ config (mc_ref=5.0 decoupled)."""
try:
ensure_jit, ENGINE_KWARGS, MC_BASE_CFG, load_data, load_forewarner, _ = _load_backtest_harness()
except Exception as e:
pytest.skip(f"Backtest data not available: {e}")
ensure_jit()
d = load_data()
fw = load_forewarner()
eng = create_d_liq_engine(**ENGINE_KWARGS)
_run_engine(eng, d, fw, MC_BASE_CFG)
assert eng.mc_monitor['red'] == 0, \
f"MC fired RED on {eng.mc_monitor['red']} days — decoupling may be broken"
assert eng.mc_monitor['halted'] == 0, \
f"MC halted {eng.mc_monitor['halted']} days — unexpected"

View File

@@ -0,0 +1,324 @@
"""Redis Integration Tests for SignalBridgeActor.
Uses fakeredis to provide a mock Redis server for testing.
This tests the full signal flow from Redis to Nautilus.
"""
import asyncio
import json
import pytest
from unittest.mock import Mock, patch, PropertyMock
from datetime import datetime
class TestRedisConnection:
"""Test Redis connection and basic operations."""
def test_fakeredis_available(self):
"""Test that fakeredis is available and working."""
import fakeredis
# Create a fake Redis server
server = fakeredis.FakeServer()
r = fakeredis.FakeStrictRedis(server=server)
# Test basic operations
r.set('test_key', 'test_value')
value = r.get('test_key')
assert value.decode() == 'test_value'
def test_fakeredis_streams(self):
"""Test that fakeredis supports Redis Streams."""
import fakeredis
server = fakeredis.FakeServer()
r = fakeredis.FakeStrictRedis(server=server)
# Add entry to stream
r.xadd('test_stream', {'data': 'value1'})
r.xadd('test_stream', {'data': 'value2'})
# Read from stream
messages = r.xread({'test_stream': '0'}, count=10)
assert len(messages) == 1
stream_name, entries = messages[0]
assert len(entries) == 2
class TestSignalBridgeWithRedis:
"""Test SignalBridgeActor with Redis integration."""
@pytest.fixture
def redis_setup(self):
"""Set up fake Redis server and SignalBridgeActor."""
import fakeredis
from nautilus_dolphin.nautilus.signal_bridge import SignalBridgeActor, SignalBridgeConfig
from nautilus_trader.common.component import TestClock
# Create fake Redis server
server = fakeredis.FakeServer()
fake_redis = fakeredis.FakeStrictRedis(server=server)
# Create SignalBridgeActor with mocked Redis
config = SignalBridgeConfig(
redis_url='redis://localhost:6379',
stream_key='dolphin:signals:stream',
max_signal_age_sec=60
)
actor = SignalBridgeActor(config)
# Mock the clock
clock = TestClock()
clock.set_time(int(datetime.now().timestamp() * 1e9))
with patch.object(type(actor), 'clock', new_callable=PropertyMock) as mock_clock:
mock_clock.return_value = clock
# Replace Redis connection with fake
actor._redis = fake_redis
yield actor, fake_redis, clock
@pytest.mark.asyncio
async def test_signal_bridge_consumes_signal(self, redis_setup):
"""Test that SignalBridgeActor consumes signals from Redis."""
actor, fake_redis, clock = redis_setup
# Create a valid signal
signal = {
'timestamp': int(clock.timestamp_ns() / 1e9), # seconds
'asset': 'BTCUSDT',
'direction': 'SHORT',
'vel_div': -0.025,
'strength': 0.75,
'irp_alignment': 0.5,
'direction_confirm': True,
'lookback_momentum': 0.0001
}
# Add signal to Redis stream
fake_redis.xadd(
'dolphin:signals:stream',
{'signal': json.dumps(signal)}
)
# Manually call _consume_stream (simulated)
# Read from stream
messages = fake_redis.xread({'dolphin:signals:stream': '0'}, count=10)
assert len(messages) == 1
stream_name, entries = messages[0]
assert len(entries) == 1
@pytest.mark.asyncio
async def test_signal_bridge_validates_signal(self, redis_setup):
"""Test signal validation in SignalBridgeActor."""
actor, fake_redis, clock = redis_setup
# Create a valid signal
valid_signal = {
'timestamp': int(clock.timestamp_ns() / 1e9),
'asset': 'BTCUSDT',
'direction': 'SHORT',
'vel_div': -0.025,
'strength': 0.75,
'irp_alignment': 0.5,
'direction_confirm': True,
'lookback_momentum': 0.0001
}
# Test validation
is_valid = actor._validate_signal(valid_signal)
assert is_valid is True
# Test invalid signal (missing field)
invalid_signal = {
'timestamp': int(clock.timestamp_ns() / 1e9),
'asset': 'BTCUSDT'
}
is_valid = actor._validate_signal(invalid_signal)
assert is_valid is False
@pytest.mark.asyncio
async def test_signal_bridge_rejects_stale_signal(self, redis_setup):
"""Test that stale signals are rejected."""
actor, fake_redis, clock = redis_setup
# Create a stale signal (70 seconds old, older than max_signal_age_sec=60)
stale_signal = {
'timestamp': int((clock.timestamp_ns() / 1e9) - 70),
'asset': 'BTCUSDT',
'direction': 'SHORT',
'vel_div': -0.025,
'strength': 0.75,
'irp_alignment': 0.5,
'direction_confirm': True,
'lookback_momentum': 0.0001
}
is_valid = actor._validate_signal(stale_signal)
assert is_valid is False
class TestSignalFlow:
"""Test complete signal flow from Redis to strategy."""
@pytest.mark.asyncio
async def test_full_signal_flow(self):
"""Test complete signal flow: Redis -> SignalBridge -> Strategy."""
import fakeredis
from nautilus_dolphin.nautilus.signal_bridge import SignalBridgeActor, SignalBridgeConfig
from nautilus_dolphin.nautilus.strategy import DolphinExecutionStrategyForTesting
from nautilus_trader.common.component import TestClock
# Set up fake Redis
server = fakeredis.FakeServer()
fake_redis = fakeredis.FakeStrictRedis(server=server)
# Create SignalBridgeActor
bridge_config = SignalBridgeConfig(
redis_url='redis://localhost:6379',
stream_key='dolphin:signals:stream',
max_signal_age_sec=60
)
bridge_actor = SignalBridgeActor(bridge_config)
bridge_actor._redis = fake_redis
# Create strategy
strategy_config = {
'venue': 'BINANCE_FUTURES',
'acb_enabled': True,
'max_leverage': 5.0
}
strategy = DolphinExecutionStrategyForTesting(strategy_config)
# Set up clock
clock = TestClock()
clock.set_time(int(datetime.now().timestamp() * 1e9))
with patch.object(type(bridge_actor), 'clock', new_callable=PropertyMock) as mock_clock:
mock_clock.return_value = clock
# Create and publish signal to Redis
signal = {
'timestamp': int(clock.timestamp_ns() / 1e9),
'asset': 'BTCUSDT',
'direction': 'SHORT',
'vel_div': -0.025,
'strength': 0.75,
'irp_alignment': 0.5,
'direction_confirm': True,
'lookback_momentum': 0.0001,
'price': 50000.0
}
fake_redis.xadd(
'dolphin:signals:stream',
{'signal': json.dumps(signal)}
)
# Verify signal is in Redis
messages = fake_redis.xread({'dolphin:signals:stream': '0'}, count=10)
assert len(messages) == 1
# Verify strategy would process it (filters)
can_trade = strategy._should_trade(signal)
assert can_trade == "" # Empty string means can trade
@pytest.mark.asyncio
async def test_multiple_signals_processing(self):
"""Test processing multiple signals from Redis."""
import fakeredis
from nautilus_dolphin.nautilus.signal_bridge import SignalBridgeActor, SignalBridgeConfig
from nautilus_trader.common.component import TestClock
# Set up fake Redis
server = fakeredis.FakeServer()
fake_redis = fakeredis.FakeStrictRedis(server=server)
# Create SignalBridgeActor
bridge_config = SignalBridgeConfig(
redis_url='redis://localhost:6379',
stream_key='dolphin:signals:stream',
max_signal_age_sec=60
)
bridge_actor = SignalBridgeActor(bridge_config)
bridge_actor._redis = fake_redis
clock = TestClock()
clock.set_time(int(datetime.now().timestamp() * 1e9))
with patch.object(type(bridge_actor), 'clock', new_callable=PropertyMock) as mock_clock:
mock_clock.return_value = clock
# Publish multiple signals
assets = ['BTCUSDT', 'ETHUSDT', 'ADAUSDT']
for i, asset in enumerate(assets):
signal = {
'timestamp': int(clock.timestamp_ns() / 1e9),
'asset': asset,
'direction': 'SHORT',
'vel_div': -0.025 - (i * 0.01),
'strength': 0.75,
'irp_alignment': 0.5,
'direction_confirm': True,
'lookback_momentum': 0.0001,
'price': 50000.0 - (i * 1000)
}
fake_redis.xadd(
'dolphin:signals:stream',
{'signal': json.dumps(signal)}
)
# Verify all signals are in Redis
messages = fake_redis.xread({'dolphin:signals:stream': '0'}, count=10)
assert len(messages) == 1
stream_name, entries = messages[0]
assert len(entries) == 3
class TestRedisConnectionConfig:
"""Test Redis connection configuration."""
def test_signal_bridge_config_defaults(self):
"""Test SignalBridgeConfig default values."""
from nautilus_dolphin.nautilus.signal_bridge import SignalBridgeConfig
config = SignalBridgeConfig()
assert config.redis_url == "redis://localhost:6379"
assert config.stream_key == "dolphin:signals:stream"
assert config.max_signal_age_sec == 10
def test_signal_bridge_config_custom(self):
"""Test SignalBridgeConfig custom values."""
from nautilus_dolphin.nautilus.signal_bridge import SignalBridgeConfig
config = SignalBridgeConfig(
redis_url='redis://custom:6380',
stream_key='custom:stream',
max_signal_age_sec=30
)
assert config.redis_url == "redis://custom:6380"
assert config.stream_key == "custom:stream"
assert config.max_signal_age_sec == 30
def test_config_yaml_matches(self):
"""Test that config.yaml matches SignalBridgeConfig."""
import yaml
from nautilus_dolphin.nautilus.signal_bridge import SignalBridgeConfig
# Load config.yaml
with open('config/config.yaml', 'r') as f:
config = yaml.safe_load(f)
signal_bridge_config = config.get('signal_bridge', {})
# Verify keys match
assert 'redis_url' in signal_bridge_config
assert 'stream_key' in signal_bridge_config
assert 'max_signal_age_sec' in signal_bridge_config

View File

@@ -0,0 +1,533 @@
"""
test_rt_exit_manager.py — Unit and integration tests for RealTimeExitManager.
Tests cover:
- No positions → empty result
- SHORT TP trigger (price falls through tp_price)
- SHORT no trigger (price above tp_price)
- SHORT SL trigger (price rises through sl_price)
- LONG TP and SL triggers
- SL disabled (sl_price=0.0) — never fires
- Max-hold trigger (time-based, no price lookup)
- Max-hold disabled
- Multiple positions, only one triggers
- Register / unregister lifecycle
- Exact fill-side convention (SHORT exit uses ask, LONG uses bid)
- None price → no trigger (feed unavailable)
- make_position threshold arithmetic
- Immutability of OpenPosition / ExitSignal
"""
import sys
import time
import unittest
sys.path.insert(0, 'nautilus_dolphin')
from nautilus_dolphin.nautilus.live_price_feed import ConstantPriceFeed, NullPriceFeed
from nautilus_dolphin.nautilus.rt_exit_manager import (
ExitSignal,
OpenPosition,
RealTimeExitManager,
)
# ── Helpers ───────────────────────────────────────────────────────────────────
def _make_mgr(prices: dict | None = None, tp_pct: float = 0.0095, sl_pct: float = 0.0):
feed = ConstantPriceFeed(prices or {})
return RealTimeExitManager(feed, tp_pct=tp_pct, sl_pct=sl_pct), feed
def _pos(
trade_id='T1',
asset='BTCUSDT',
direction=-1,
entry_price=82000.0,
tp_price=81221.0,
sl_price=0.0,
entry_ns=None,
max_hold_ns=0,
):
return OpenPosition(
trade_id=trade_id,
asset=asset,
direction=direction,
entry_price=entry_price,
tp_price=tp_price,
sl_price=sl_price,
entry_ns=entry_ns if entry_ns is not None else time.monotonic_ns(),
max_hold_ns=max_hold_ns,
)
# ── Basic lifecycle ───────────────────────────────────────────────────────────
class TestLifecycle(unittest.TestCase):
def test_no_positions_returns_empty_list(self):
mgr, _ = _make_mgr()
result = mgr.check_all()
self.assertIsInstance(result, list)
self.assertEqual(result, [])
def test_open_count_starts_zero(self):
mgr, _ = _make_mgr()
self.assertEqual(mgr.open_count(), 0)
def test_register_increments_count(self):
mgr, _ = _make_mgr({'BTCUSDT': (82000.0, 82010.0)})
mgr.register(_pos())
self.assertEqual(mgr.open_count(), 1)
def test_unregister_decrements_count(self):
mgr, _ = _make_mgr()
mgr.register(_pos('T1'))
mgr.unregister('T1')
self.assertEqual(mgr.open_count(), 0)
def test_unregister_unknown_is_noop(self):
mgr, _ = _make_mgr()
mgr.unregister('NONEXISTENT') # must not raise
self.assertEqual(mgr.open_count(), 0)
def test_is_registered_true_after_register(self):
mgr, _ = _make_mgr()
mgr.register(_pos('T42'))
self.assertTrue(mgr.is_registered('T42'))
def test_is_registered_false_after_unregister(self):
mgr, _ = _make_mgr()
mgr.register(_pos('T42'))
mgr.unregister('T42')
self.assertFalse(mgr.is_registered('T42'))
# ── SHORT TP ──────────────────────────────────────────────────────────────────
class TestShortTP(unittest.TestCase):
def test_short_tp_fires_when_ask_below_tp_price(self):
# SHORT entry 82000, tp_pct=0.0095 → tp_price = 82000*(1-0.0095) = 81221
mgr, feed = _make_mgr(tp_pct=0.0095, sl_pct=0.0)
feed.update('BTCUSDT', bid=81200.0, ask=81210.0) # ask 81210 < tp 81221 → TP
p = RealTimeExitManager.make_position('T1', 'BTCUSDT', -1, 82000.0, tp_pct=0.0095)
mgr.register(p)
signals = mgr.check_all()
self.assertEqual(len(signals), 1)
sig = signals[0]
self.assertEqual(sig.trade_id, 'T1')
self.assertEqual(sig.asset, 'BTCUSDT')
self.assertEqual(sig.reason, 'RT_TP')
self.assertAlmostEqual(sig.exit_price, 81210.0) # ask price used for short exit
def test_short_tp_does_not_fire_when_ask_above_tp_price(self):
mgr, feed = _make_mgr(tp_pct=0.0095)
feed.update('BTCUSDT', bid=81900.0, ask=81910.0) # ask 81910 > tp 81221 → no signal
p = RealTimeExitManager.make_position('T1', 'BTCUSDT', -1, 82000.0, tp_pct=0.0095)
mgr.register(p)
self.assertEqual(mgr.check_all(), [])
def test_short_tp_fires_at_exact_tp_price(self):
tp_price = 82000.0 * (1.0 - 0.0095) # = 81221.0
mgr, feed = _make_mgr(tp_pct=0.0095)
feed.update('BTCUSDT', bid=tp_price - 0.01, ask=tp_price) # ask == tp_price exactly
p = RealTimeExitManager.make_position('T1', 'BTCUSDT', -1, 82000.0, tp_pct=0.0095)
mgr.register(p)
signals = mgr.check_all()
self.assertEqual(len(signals), 1)
self.assertEqual(signals[0].reason, 'RT_TP')
# ── SHORT SL ──────────────────────────────────────────────────────────────────
class TestShortSL(unittest.TestCase):
def test_short_sl_fires_when_ask_above_sl_price(self):
# Entry 82000, sl_pct=0.01 → sl_price = 82000*1.01 = 82820
mgr, feed = _make_mgr(tp_pct=0.0095, sl_pct=0.01)
feed.update('BTCUSDT', bid=82900.0, ask=82920.0) # ask 82920 > sl 82820 → SL
p = RealTimeExitManager.make_position('T1', 'BTCUSDT', -1, 82000.0,
tp_pct=0.0095, sl_pct=0.01)
mgr.register(p)
signals = mgr.check_all()
self.assertEqual(len(signals), 1)
self.assertEqual(signals[0].reason, 'RT_SL')
self.assertAlmostEqual(signals[0].exit_price, 82920.0)
def test_short_sl_disabled_at_zero(self):
# sl_pct=0.0 → sl_price=0.0 → never fires regardless of price
mgr, feed = _make_mgr(tp_pct=0.0095, sl_pct=0.0)
feed.update('BTCUSDT', bid=999999.0, ask=1000000.0) # absurd price
p = RealTimeExitManager.make_position('T1', 'BTCUSDT', -1, 82000.0,
tp_pct=0.0095, sl_pct=0.0)
mgr.register(p)
self.assertEqual(mgr.check_all(), [])
def test_short_sl_does_not_fire_below_sl_price(self):
mgr, feed = _make_mgr(tp_pct=0.0095, sl_pct=0.01)
feed.update('BTCUSDT', bid=82700.0, ask=82710.0) # ask 82710 < sl 82820 → no signal
p = RealTimeExitManager.make_position('T1', 'BTCUSDT', -1, 82000.0,
tp_pct=0.0095, sl_pct=0.01)
mgr.register(p)
self.assertEqual(mgr.check_all(), [])
# ── LONG TP / SL ──────────────────────────────────────────────────────────────
class TestLongTPSL(unittest.TestCase):
def test_long_tp_fires_when_bid_above_tp_price(self):
# LONG entry 1600, tp_pct=0.0095 → tp_price = 1600*1.0095 = 1615.2
mgr, feed = _make_mgr(tp_pct=0.0095)
feed.update('ETHUSDT', bid=1616.0, ask=1616.5) # bid 1616 > tp 1615.2 → TP
p = RealTimeExitManager.make_position('T2', 'ETHUSDT', +1, 1600.0, tp_pct=0.0095)
mgr.register(p)
signals = mgr.check_all()
self.assertEqual(len(signals), 1)
self.assertEqual(signals[0].reason, 'RT_TP')
self.assertAlmostEqual(signals[0].exit_price, 1616.0) # bid for LONG exit
def test_long_tp_does_not_fire_when_bid_below_tp_price(self):
mgr, feed = _make_mgr(tp_pct=0.0095)
feed.update('ETHUSDT', bid=1610.0, ask=1610.5) # bid 1610 < tp 1615.2 → no signal
p = RealTimeExitManager.make_position('T2', 'ETHUSDT', +1, 1600.0, tp_pct=0.0095)
mgr.register(p)
self.assertEqual(mgr.check_all(), [])
def test_long_sl_fires_when_bid_below_sl_price(self):
# LONG entry 1600, sl_pct=0.01 → sl_price = 1600*0.99 = 1584
mgr, feed = _make_mgr(tp_pct=0.0095, sl_pct=0.01)
feed.update('ETHUSDT', bid=1583.0, ask=1583.5) # bid 1583 < sl 1584 → SL
p = RealTimeExitManager.make_position('T2', 'ETHUSDT', +1, 1600.0,
tp_pct=0.0095, sl_pct=0.01)
mgr.register(p)
signals = mgr.check_all()
self.assertEqual(len(signals), 1)
self.assertEqual(signals[0].reason, 'RT_SL')
# ── Max-hold ──────────────────────────────────────────────────────────────────
class TestMaxHold(unittest.TestCase):
def test_max_hold_fires_when_elapsed(self):
mgr, feed = _make_mgr()
# entry_ns far in the past — hold limit already exceeded
old_ns = time.monotonic_ns() - int(300 * 1e9) # 5 minutes ago
pos = OpenPosition(
trade_id='T_MAXHOLD',
asset='BTCUSDT',
direction=-1,
entry_price=82000.0,
tp_price=81000.0, # price not crossed
sl_price=0.0,
entry_ns=old_ns,
max_hold_ns=int(60 * 1e9), # 60s limit — clearly exceeded
)
feed.update('BTCUSDT', bid=81900.0, ask=81910.0) # price not at TP
mgr.register(pos)
signals = mgr.check_all()
self.assertEqual(len(signals), 1)
self.assertEqual(signals[0].reason, 'RT_MAX_HOLD')
def test_max_hold_not_fired_before_limit(self):
mgr, feed = _make_mgr()
pos = OpenPosition(
trade_id='T_FRESH',
asset='BTCUSDT',
direction=-1,
entry_price=82000.0,
tp_price=81000.0,
sl_price=0.0,
entry_ns=time.monotonic_ns(), # just now
max_hold_ns=int(3600 * 1e9), # 1 hour limit
)
feed.update('BTCUSDT', bid=81900.0, ask=81910.0)
mgr.register(pos)
self.assertEqual(mgr.check_all(), [])
def test_max_hold_disabled_at_zero(self):
mgr, feed = _make_mgr()
old_ns = time.monotonic_ns() - int(86400 * 1e9) # 1 day ago
pos = OpenPosition(
trade_id='T_NOLIMIT',
asset='BTCUSDT',
direction=-1,
entry_price=82000.0,
tp_price=81000.0,
sl_price=0.0,
entry_ns=old_ns,
max_hold_ns=0, # disabled
)
feed.update('BTCUSDT', bid=81900.0, ask=81910.0)
mgr.register(pos)
self.assertEqual(mgr.check_all(), []) # no TP cross, no max_hold → silent
def test_max_hold_uses_live_price_when_available(self):
mgr, feed = _make_mgr()
old_ns = time.monotonic_ns() - int(300 * 1e9)
pos = OpenPosition(
trade_id='T_MH_PX',
asset='BTCUSDT',
direction=-1,
entry_price=82000.0,
tp_price=80000.0,
sl_price=0.0,
entry_ns=old_ns,
max_hold_ns=int(60 * 1e9),
)
feed.update('BTCUSDT', bid=81500.0, ask=81510.0)
mgr.register(pos)
signals = mgr.check_all()
self.assertEqual(signals[0].reason, 'RT_MAX_HOLD')
# SHORT max-hold uses ask price
self.assertAlmostEqual(signals[0].exit_price, 81510.0)
def test_max_hold_fallback_to_entry_price_when_no_quote(self):
mgr, _ = _make_mgr(prices={}) # no prices
old_ns = time.monotonic_ns() - int(300 * 1e9)
pos = OpenPosition(
trade_id='T_MH_NOPX',
asset='BTCUSDT',
direction=-1,
entry_price=82000.0,
tp_price=80000.0,
sl_price=0.0,
entry_ns=old_ns,
max_hold_ns=int(60 * 1e9),
)
mgr.register(pos)
signals = mgr.check_all()
self.assertEqual(signals[0].reason, 'RT_MAX_HOLD')
self.assertAlmostEqual(signals[0].exit_price, 82000.0) # fallback to entry
# ── None price (feed unavailable) ─────────────────────────────────────────────
class TestNullFeed(unittest.TestCase):
def test_null_feed_no_signal_for_price_based_exits(self):
feed = NullPriceFeed()
mgr = RealTimeExitManager(feed, tp_pct=0.0095, sl_pct=0.01)
pos = RealTimeExitManager.make_position('T1', 'BTCUSDT', -1, 82000.0,
tp_pct=0.0095, sl_pct=0.01)
mgr.register(pos)
self.assertEqual(mgr.check_all(), [])
# ── Multiple positions ────────────────────────────────────────────────────────
class TestMultiplePositions(unittest.TestCase):
def test_only_triggered_position_in_result(self):
mgr, feed = _make_mgr(tp_pct=0.0095)
# BTC: price crossed TP — entry 82000, tp=81221, ask=81210 (< tp) → fires
feed.update('BTCUSDT', bid=81200.0, ask=81210.0)
# ETH: price NOT at TP — entry 1620, tp=1604.61, ask=1610 (> tp) → no fire
feed.update('ETHUSDT', bid=1609.0, ask=1610.0)
p_btc = RealTimeExitManager.make_position('T_BTC', 'BTCUSDT', -1, 82000.0, tp_pct=0.0095)
p_eth = RealTimeExitManager.make_position('T_ETH', 'ETHUSDT', -1, 1620.0, tp_pct=0.0095)
mgr.register(p_btc)
mgr.register(p_eth)
signals = mgr.check_all()
self.assertEqual(len(signals), 1)
self.assertEqual(signals[0].trade_id, 'T_BTC')
def test_both_trigger_returns_two_signals(self):
mgr, feed = _make_mgr(tp_pct=0.0095)
feed.update('BTCUSDT', bid=81200.0, ask=81210.0)
feed.update('ETHUSDT', bid=1595.0, ask=1595.5) # below tp(1604.6)
p_btc = RealTimeExitManager.make_position('T_BTC', 'BTCUSDT', -1, 82000.0, tp_pct=0.0095)
p_eth = RealTimeExitManager.make_position('T_ETH', 'ETHUSDT', -1, 1620.0, tp_pct=0.0095)
mgr.register(p_btc)
mgr.register(p_eth)
signals = mgr.check_all()
self.assertEqual(len(signals), 2)
def test_check_all_does_not_auto_unregister(self):
"""Caller is responsible for unregistering — check_all is read-only."""
mgr, feed = _make_mgr(tp_pct=0.0095)
feed.update('BTCUSDT', bid=81200.0, ask=81210.0)
p = RealTimeExitManager.make_position('T1', 'BTCUSDT', -1, 82000.0, tp_pct=0.0095)
mgr.register(p)
_ = mgr.check_all()
# Position still registered — caller must unregister
self.assertTrue(mgr.is_registered('T1'))
def test_unregister_after_signal_stops_repeat_firing(self):
mgr, feed = _make_mgr(tp_pct=0.0095)
feed.update('BTCUSDT', bid=81200.0, ask=81210.0)
p = RealTimeExitManager.make_position('T1', 'BTCUSDT', -1, 82000.0, tp_pct=0.0095)
mgr.register(p)
signals = mgr.check_all()
self.assertEqual(len(signals), 1)
mgr.unregister('T1')
self.assertEqual(mgr.check_all(), [])
# ── Fill-side convention ──────────────────────────────────────────────────────
class TestFillSide(unittest.TestCase):
"""Exit price must be ask for SHORT (cost to close) and bid for LONG (proceeds)."""
def test_short_exit_price_is_ask(self):
mgr, feed = _make_mgr(tp_pct=0.0095)
feed.update('BTCUSDT', bid=81210.0, ask=81220.0) # ask(81220) <= tp(81221)
p = RealTimeExitManager.make_position('T1', 'BTCUSDT', -1, 82000.0, tp_pct=0.0095)
mgr.register(p)
signals = mgr.check_all()
self.assertEqual(len(signals), 1)
self.assertAlmostEqual(signals[0].exit_price, 81220.0) # ask, not bid
def test_long_exit_price_is_bid(self):
mgr, feed = _make_mgr(tp_pct=0.0095)
feed.update('ETHUSDT', bid=1616.0, ask=1616.5) # bid(1616) >= tp(1615.2)
p = RealTimeExitManager.make_position('T2', 'ETHUSDT', +1, 1600.0, tp_pct=0.0095)
mgr.register(p)
signals = mgr.check_all()
self.assertEqual(len(signals), 1)
self.assertAlmostEqual(signals[0].exit_price, 1616.0) # bid, not ask
# ── make_position factory ─────────────────────────────────────────────────────
class TestMakePosition(unittest.TestCase):
def test_short_tp_price_is_below_entry(self):
pos = RealTimeExitManager.make_position('T', 'BTC', -1, 82000.0, tp_pct=0.0095)
expected_tp = 82000.0 * (1.0 - 0.0095)
self.assertAlmostEqual(pos.tp_price, expected_tp, places=4)
def test_short_tp_price_less_than_entry(self):
pos = RealTimeExitManager.make_position('T', 'BTC', -1, 82000.0, tp_pct=0.0095)
self.assertLess(pos.tp_price, pos.entry_price)
def test_long_tp_price_is_above_entry(self):
pos = RealTimeExitManager.make_position('T', 'ETH', +1, 1600.0, tp_pct=0.0095)
expected_tp = 1600.0 * (1.0 + 0.0095)
self.assertAlmostEqual(pos.tp_price, expected_tp, places=4)
def test_long_tp_price_greater_than_entry(self):
pos = RealTimeExitManager.make_position('T', 'ETH', +1, 1600.0, tp_pct=0.0095)
self.assertGreater(pos.tp_price, pos.entry_price)
def test_short_sl_price_is_above_entry(self):
pos = RealTimeExitManager.make_position('T', 'BTC', -1, 82000.0,
tp_pct=0.0095, sl_pct=0.01)
expected_sl = 82000.0 * 1.01
self.assertAlmostEqual(pos.sl_price, expected_sl, places=4)
def test_long_sl_price_is_below_entry(self):
pos = RealTimeExitManager.make_position('T', 'ETH', +1, 1600.0,
tp_pct=0.0095, sl_pct=0.01)
expected_sl = 1600.0 * 0.99
self.assertAlmostEqual(pos.sl_price, expected_sl, places=4)
def test_sl_disabled_when_sl_pct_zero(self):
pos = RealTimeExitManager.make_position('T', 'BTC', -1, 82000.0,
tp_pct=0.0095, sl_pct=0.0)
self.assertAlmostEqual(pos.sl_price, 0.0)
def test_max_hold_ns_computed_correctly(self):
pos = RealTimeExitManager.make_position('T', 'BTC', -1, 82000.0,
tp_pct=0.0095,
max_hold_bars=250,
bar_seconds=60)
expected_ns = 250 * 60 * 1_000_000_000
self.assertEqual(pos.max_hold_ns, expected_ns)
def test_max_hold_disabled_when_zero_bars(self):
pos = RealTimeExitManager.make_position('T', 'BTC', -1, 82000.0,
tp_pct=0.0095, max_hold_bars=0)
self.assertEqual(pos.max_hold_ns, 0)
def test_open_position_is_frozen(self):
pos = RealTimeExitManager.make_position('T', 'BTC', -1, 82000.0, tp_pct=0.0095)
with self.assertRaises((AttributeError, TypeError)):
pos.trade_id = 'MUTATED' # type: ignore
def test_exit_signal_is_frozen(self):
sig = ExitSignal('T', 'BTC', 'RT_TP', 81200.0, 123456)
with self.assertRaises((AttributeError, TypeError)):
sig.reason = 'MUTATED' # type: ignore
# ── E2E: full lifecycle ───────────────────────────────────────────────────────
class TestEndToEnd(unittest.TestCase):
"""Simulate a full trade cycle: entry → mid-trade price movement → RT exit."""
def test_short_trade_full_cycle(self):
"""
Entry SHORT BTCUSDT @ 82000.
TP = 81221 (0.95% below entry).
Price moves down to 81100 (below TP) → RT_TP fires.
Unregister → subsequent check returns empty.
"""
feed = ConstantPriceFeed({'BTCUSDT': (82000.0, 82010.0)})
mgr = RealTimeExitManager(feed, tp_pct=0.0095)
pos = RealTimeExitManager.make_position(
'T_CYCLE', 'BTCUSDT', direction=-1,
entry_price=82000.0, tp_pct=0.0095,
)
mgr.register(pos)
# Price not yet at TP
self.assertEqual(mgr.check_all(), [])
self.assertEqual(mgr.open_count(), 1)
# Price moves to TP
feed.update('BTCUSDT', bid=81100.0, ask=81110.0)
signals = mgr.check_all()
self.assertEqual(len(signals), 1)
self.assertEqual(signals[0].reason, 'RT_TP')
self.assertAlmostEqual(signals[0].exit_price, 81110.0)
# Caller unregisters after handling signal
mgr.unregister('T_CYCLE')
self.assertEqual(mgr.open_count(), 0)
self.assertEqual(mgr.check_all(), [])
def test_multiple_symbols_independent_lifecycle(self):
feed = ConstantPriceFeed({
'BTCUSDT': (82000.0, 82010.0),
'ETHUSDT': (1600.0, 1600.5),
'SOLUSDT': (120.0, 120.05),
})
mgr = RealTimeExitManager(feed, tp_pct=0.0095)
for tid, sym, ep in [
('T1', 'BTCUSDT', 82000.0),
('T2', 'ETHUSDT', 1600.0),
('T3', 'SOLUSDT', 120.0),
]:
mgr.register(RealTimeExitManager.make_position(tid, sym, -1, ep, tp_pct=0.0095))
self.assertEqual(mgr.open_count(), 3)
self.assertEqual(mgr.check_all(), []) # no TP crossed yet
# BTC hits TP
feed.update('BTCUSDT', bid=81100.0, ask=81110.0)
signals = mgr.check_all()
self.assertEqual(len(signals), 1)
self.assertEqual(signals[0].trade_id, 'T1')
mgr.unregister('T1')
self.assertEqual(mgr.open_count(), 2)
# ETH hits TP
feed.update('ETHUSDT', bid=1582.0, ask=1582.5)
signals = mgr.check_all()
self.assertEqual(len(signals), 1)
self.assertEqual(signals[0].trade_id, 'T2')
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,153 @@
"""Tests for SignalBridgeActor.
Uses TestClock pattern from Nautilus to properly initialize Actor components.
"""
import pytest
from unittest.mock import Mock, patch, PropertyMock
from nautilus_dolphin.nautilus.signal_bridge import SignalBridgeActor, SignalBridgeConfig
class TestSignalBridgeTimestampParsing:
"""Test timestamp parsing without full Nautilus initialization."""
def test_parse_timestamp_ns_from_seconds(self):
"""Test timestamp conversion from seconds."""
config = SignalBridgeConfig(redis_url='redis://localhost')
actor = SignalBridgeActor(config)
ts_sec = 1234567890
ts_ns = actor._parse_timestamp_ns(ts_sec)
assert ts_ns == 1234567890 * 1_000_000_000
def test_parse_timestamp_ns_from_milliseconds(self):
"""Test timestamp conversion from milliseconds."""
config = SignalBridgeConfig(redis_url='redis://localhost')
actor = SignalBridgeActor(config)
ts_ms = 1234567890123
ts_ns = actor._parse_timestamp_ns(ts_ms)
assert ts_ns == 1234567890123 * 1_000
def test_parse_timestamp_ns_from_nanoseconds(self):
"""Test timestamp already in nanoseconds."""
config = SignalBridgeConfig(redis_url='redis://localhost')
actor = SignalBridgeActor(config)
ts_ns = 1234567890123456789
result = actor._parse_timestamp_ns(ts_ns)
assert result == ts_ns
class TestSignalBridgeWithNautilus:
"""Test SignalBridgeActor with proper Nautilus initialization."""
@pytest.fixture
def nautilus_actor(self):
"""Create a properly initialized SignalBridgeActor with mocked clock."""
from nautilus_trader.common.component import TestClock
config = SignalBridgeConfig(
redis_url='redis://localhost',
max_signal_age_sec=10
)
actor = SignalBridgeActor(config)
# Create TestClock and patch the clock property
clock = TestClock()
# Use patch to mock the clock property
with patch.object(
type(actor),
'clock',
new_callable=PropertyMock
) as mock_clock:
mock_clock.return_value = clock
yield actor, clock
def test_validate_signal_missing_fields(self, nautilus_actor):
"""Test signal validation rejects missing fields."""
actor, clock = nautilus_actor
signal = {'timestamp': 1234567890, 'asset': 'BTCUSDT'}
assert not actor._validate_signal(signal)
def test_validate_signal_valid(self, nautilus_actor):
"""Test signal validation accepts valid signal."""
actor, clock = nautilus_actor
# Set clock to be after signal timestamp
clock.set_time(1234567890 * 1_000_000_000 + 5_000_000_000)
signal = {
'timestamp': 1234567890,
'asset': 'BTCUSDT',
'direction': 'SHORT',
'vel_div': -0.025,
'strength': 0.75
}
assert actor._validate_signal(signal)
def test_validate_signal_stale(self, nautilus_actor):
"""Test signal validation rejects stale signals."""
actor, clock = nautilus_actor
# Set clock to be much later than signal (older than max_signal_age_sec)
clock.set_time(1234567890 * 1_000_000_000 + 20_000_000_000) # 20s later
signal = {
'timestamp': 1234567890,
'asset': 'BTCUSDT',
'direction': 'SHORT',
'vel_div': -0.025,
'strength': 0.75
}
assert not actor._validate_signal(signal)
def test_validate_signal_future(self, nautilus_actor):
"""Test signal validation rejects future signals."""
actor, clock = nautilus_actor
# Set clock to be before signal timestamp
clock.set_time(1234567890 * 1_000_000_000 - 1_000_000_000)
signal = {
'timestamp': 1234567890,
'asset': 'BTCUSDT',
'direction': 'SHORT',
'vel_div': -0.025,
'strength': 0.75
}
assert not actor._validate_signal(signal)
class TestSignalBridgeConfig:
"""Test SignalBridgeConfig initialization."""
def test_default_config(self):
"""Test default configuration values."""
config = SignalBridgeConfig()
assert config.redis_url == "redis://localhost:6379"
assert config.stream_key == "dolphin:signals:stream"
assert config.max_signal_age_sec == 10
def test_custom_config(self):
"""Test custom configuration values."""
config = SignalBridgeConfig(
redis_url="redis://custom:6380",
stream_key="custom:stream",
max_signal_age_sec=30
)
assert config.redis_url == "redis://custom:6380"
assert config.stream_key == "custom:stream"
assert config.max_signal_age_sec == 30

View File

@@ -0,0 +1,179 @@
"""Tests for SmartExecAlgorithm enhancements."""
import pytest
from unittest.mock import Mock, MagicMock
from nautilus_trader.model.enums import OrderSide
from nautilus_trader.model.objects import Price
from nautilus_dolphin.nautilus.smart_exec_algorithm import SmartExecAlgorithmForTesting
class TestSmartExecAlgorithmAbortLogic:
"""Tests for the abort logic when price moves against position."""
def test_should_abort_entry_buy_price_rises(self):
"""Test BUY entry aborted when ask rises 5bps above limit."""
algo = SmartExecAlgorithmForTesting({'entry_abort_threshold_bps': 5.0})
# Create mock order (BUY at 50000)
order = Mock()
order.side = OrderSide.BUY
# Create mock tick (ask moved up 1% - well above 5bps threshold)
tick = Mock()
tick.ask_price = Price(50500, 2)
tick.bid_price = Price(50499, 2)
metadata = {'limit_price': 50000.0}
# Should abort (price moved more than 5bps against)
assert algo._should_abort_entry(order, tick, metadata) is True
def test_should_not_abort_entry_buy_price_stable(self):
"""Test BUY entry not aborted when price hasn't moved much."""
algo = SmartExecAlgorithmForTesting({'entry_abort_threshold_bps': 5.0})
order = Mock()
order.side = OrderSide.BUY
# Ask only 2bps higher (below 5bps threshold)
tick = Mock()
tick.ask_price = Price(50010, 2)
tick.bid_price = Price(50009, 2)
metadata = {'limit_price': 50000.0}
assert algo._should_abort_entry(order, tick, metadata) is False
def test_should_abort_entry_sell_price_drops(self):
"""Test SELL entry aborted when bid drops 5bps below limit."""
algo = SmartExecAlgorithmForTesting({'entry_abort_threshold_bps': 5.0})
order = Mock()
order.side = OrderSide.SELL
# Bid moved down 1% - well above 5bps threshold
tick = Mock()
tick.ask_price = Price(49501, 2)
tick.bid_price = Price(49500, 2)
metadata = {'limit_price': 50000.0}
assert algo._should_abort_entry(order, tick, metadata) is True
def test_abort_entry_cancels_order(self):
"""Test abort_entry cancels the order and updates metrics."""
algo = SmartExecAlgorithmForTesting({'entry_abort_threshold_bps': 5.0})
# Setup pending entry
order = Mock()
order.is_closed = False
algo._pending_entries = {'order1': {'limit_price': 50000.0}}
algo._abort_entry('order1', {'limit_price': 50000.0})
assert algo.cancel_order.called
assert algo._metrics['entries_aborted'] == 1
assert 'order1' not in algo._pending_entries
class TestSmartExecAlgorithmFeeTracking:
"""Tests for fee and slippage tracking."""
def test_fee_calculation_maker(self):
"""Test maker fee calculation (0.02%)."""
algo = SmartExecAlgorithmForTesting({
'maker_fee_rate': 0.0002,
'taker_fee_rate': 0.0005,
})
# $50000 notional
fee = algo._calculate_fee(50000.0, 'maker')
assert fee == 10.0 # 50000 * 0.0002
def test_fee_calculation_taker(self):
"""Test taker fee calculation (0.05%)."""
algo = SmartExecAlgorithmForTesting({
'maker_fee_rate': 0.0002,
'taker_fee_rate': 0.0005,
})
fee = algo._calculate_fee(50000.0, 'taker')
assert fee == 25.0 # 50000 * 0.0005
def test_slippage_calculation(self):
"""Test slippage calculation in basis points."""
algo = SmartExecAlgorithmForTesting({})
# BUY: expected 50000, got 50050 (10bps worse)
slippage = algo._calculate_slippage_bps(50000.0, 50050.0, OrderSide.BUY)
assert slippage == 10.0
# SELL: expected 50000, got 49950 (10bps worse)
slippage = algo._calculate_slippage_bps(50000.0, 49950.0, OrderSide.SELL)
assert slippage == 10.0
def test_get_metrics_summary(self):
"""Test metrics summary generation."""
algo = SmartExecAlgorithmForTesting({})
# Add some metrics
algo._metrics['entries_maker'] = 8
algo._metrics['entries_taker'] = 2
algo._metrics['total_fees'] = 100.0
algo._metrics['fill_count'] = 10
summary = algo.get_metrics_summary()
assert summary['entries']['total'] == 10
assert summary['entries']['maker_pct'] == 80.0
assert summary['fees']['total'] == 100.0
def test_reset_metrics(self):
"""Test metrics reset functionality."""
algo = SmartExecAlgorithmForTesting({})
# Add some metrics
algo._metrics['entries_maker'] = 5
algo._metrics['total_fees'] = 50.0
algo.reset_metrics()
assert algo._metrics['entries_maker'] == 0
assert algo._metrics['total_fees'] == 0.0
class TestSmartExecAlgorithmIntegration:
"""Integration tests for fee tracking with fills."""
def test_entry_fill_tracked_correctly(self):
"""Test that entry fills are tracked with correct fee calculation."""
algo = SmartExecAlgorithmForTesting({
'maker_fee_rate': 0.0002,
})
order = Mock()
order.side = OrderSide.BUY
order.tags = {'type': 'entry', 'expected_price': 50000.0}
# Fill at expected price (no slippage)
algo._record_fill(order, 50000.0, 1.0, 'maker')
assert algo._metrics['entries_maker'] == 1
assert algo._metrics['total_fees'] == 10.0 # 50000 * 0.0002
assert algo._metrics['fill_count'] == 1
def test_exit_fill_tracked_correctly(self):
"""Test that exit fills are tracked separately."""
algo = SmartExecAlgorithmForTesting({
'maker_fee_rate': 0.0002,
})
order = Mock()
order.side = OrderSide.SELL
order.tags = {'type': 'exit'}
algo._record_fill(order, 51000.0, 1.0, 'maker')
assert algo._metrics['exits_maker'] == 1
assert algo._metrics['entries_maker'] == 0 # Entry not counted

View File

@@ -0,0 +1,118 @@
"""Tests for DolphinExecutionStrategy."""
import pytest
from collections import deque
from unittest.mock import Mock
from nautilus_dolphin.nautilus.strategy import DolphinExecutionStrategyForTesting as DolphinExecutionStrategy
def _make_strategy_vol_ready(strategy, high=True):
"""Set vol regime state. high=True → regime is high (vol above p60)."""
strategy._vol_regime_ready = True
strategy._vol_p60 = 0.0001
strategy._current_vol = 0.01 if high else 0.000001
strategy._bar_count_in_day = 200 # past warmup
class TestDolphinExecutionStrategy:
def test_should_trade_vol_regime_filter(self):
"""Layer 2: vol regime not ready → reject."""
strategy = DolphinExecutionStrategy({})
# Default state: _vol_regime_ready=False → vol_regime_not_high
signal = {'vel_div': -0.03, 'asset': 'BTCUSDT', 'irp_alignment': 0.50}
assert strategy._should_trade(signal) == "vol_regime_not_high"
def test_should_trade_irp_filter(self):
"""Layer 3: IRP alignment filter."""
strategy = DolphinExecutionStrategy({})
_make_strategy_vol_ready(strategy)
signal_low_irp = {'vel_div': -0.03, 'asset': 'BTCUSDT', 'irp_alignment': 0.30}
assert strategy._should_trade(signal_low_irp) == "irp_too_low"
signal_good = {'vel_div': -0.03, 'asset': 'BTCUSDT', 'irp_alignment': 0.50}
assert strategy._should_trade(signal_good) == ""
def test_should_trade_direction_contradicted(self):
"""Layer 6: rising BTC price contradicts SHORT signal."""
strategy = DolphinExecutionStrategy({})
_make_strategy_vol_ready(strategy)
# BTC rose: p0=10000 (lookback_bars=7+1 ago) → p_now=10100 → +100bps → contradict
prices = [10000.0] * 5 + [10100.0] * 5
strategy._btc_dc_prices = deque(prices, maxlen=strategy.dc_lookback_bars + 2)
signal = {'vel_div': -0.03, 'asset': 'BTCUSDT', 'irp_alignment': 0.50}
assert strategy._should_trade(signal) == "direction_contradicted"
def test_should_trade_excluded_assets(self):
"""Asset exclusion filter."""
strategy = DolphinExecutionStrategy({})
_make_strategy_vol_ready(strategy)
signal = {'vel_div': -0.03, 'asset': 'TUSDUSDT', 'irp_alignment': 0.50}
assert strategy._should_trade(signal) == "asset_excluded"
def test_should_trade_position_limits(self):
"""Max concurrent positions gate."""
config = {'max_concurrent_positions': 2}
strategy = DolphinExecutionStrategy(config)
_make_strategy_vol_ready(strategy)
strategy.active_positions = {'BTCUSDT': {}, 'ETHUSDT': {}}
signal = {'vel_div': -0.03, 'asset': 'SOLUSDT', 'irp_alignment': 0.50}
assert strategy._should_trade(signal) == "max_positions_reached"
def test_should_trade_existing_position(self):
"""Duplicate position guard."""
strategy = DolphinExecutionStrategy({})
_make_strategy_vol_ready(strategy)
strategy.active_positions = {'BTCUSDT': {}}
signal = {'vel_div': -0.03, 'asset': 'BTCUSDT', 'irp_alignment': 0.50}
assert strategy._should_trade(signal) == "position_already_exists"
def test_calculate_leverage_base(self):
"""Layer 4: cubic-convex dynamic leverage from vel_div."""
config = {'min_leverage': 0.5, 'max_leverage': 5.0, 'leverage_convexity': 3.0}
strategy = DolphinExecutionStrategy(config)
# Near threshold (low strength): vel_div=-0.021 → strength≈0.03
lev_low = strategy.calculate_leverage({'vel_div': -0.021})
assert 0.5 <= lev_low < 1.0
# Near extreme (high strength): vel_div=-0.048 → strength≈0.93
lev_high = strategy.calculate_leverage({'vel_div': -0.048})
assert lev_high > 3.0
def test_calculate_leverage_with_multipliers(self):
"""Layer 5: alpha layer multipliers applied to base leverage."""
config = {'min_leverage': 0.5, 'max_leverage': 5.0, 'leverage_convexity': 3.0}
strategy = DolphinExecutionStrategy(config)
# vel_div = -0.035 → strength = 0.5 → scaled = 0.125 → base_lev = 1.0625
signal = {
'vel_div': -0.035,
'bucket_boost': 1.2,
'streak_mult': 1.1,
'trend_mult': 1.05,
}
lev = strategy.calculate_leverage(signal)
base_lev = 0.5 + (0.5 ** 3.0) * (5.0 - 0.5) # 1.0625
expected = base_lev * 1.2 * 1.1 * 1.05
assert abs(lev - expected) < 0.01
def test_calculate_position_size(self):
"""Position size: balance * fraction * leverage."""
strategy = DolphinExecutionStrategy({'capital_fraction': 0.20})
notional = strategy.calculate_position_size({'vel_div': -0.035}, 10000.0)
assert notional > 0
assert notional <= 10000.0 * 0.5 # sanity cap
def test_position_size_sanity_cap(self):
"""50% of balance hard cap even with high multipliers."""
strategy = DolphinExecutionStrategy({'capital_fraction': 0.50, 'max_leverage': 10.0})
signal = {
'vel_div': -0.08,
'bucket_boost': 2.0,
'streak_mult': 2.0,
'trend_mult': 2.0,
}
notional = strategy.calculate_position_size(signal, 10000.0)
assert notional <= 10000.0 * 0.5

View File

@@ -0,0 +1,204 @@
"""Tests for Strategy Registration.
Uses proper Nautilus initialization pattern from test_0_nautilus_bootstrap.py
"""
import pytest
from unittest.mock import Mock, patch
class TestDolphinStrategyConfig:
"""Test DolphinStrategyConfig initialization."""
def test_default_config(self):
"""Test default strategy configuration values."""
from nautilus_dolphin.nautilus.strategy_registration import DolphinStrategyConfig
config = DolphinStrategyConfig()
assert config.venue == "BINANCE_FUTURES"
assert config.irp_alignment_min == 0.45
assert config.momentum_magnitude_min == 0.000075
assert config.excluded_assets == ['TUSDUSDT', 'USDCUSDT']
assert config.min_leverage == 0.5
assert config.max_leverage == 5.0
assert config.leverage_convexity == 3.0
assert config.capital_fraction == 0.20
assert config.tp_bps == 99
assert config.max_hold_bars == 120
assert config.max_concurrent_positions == 10
assert config.daily_loss_limit_pct == 10.0
assert config.acb_enabled is True
def test_custom_config(self):
"""Test custom strategy configuration."""
from nautilus_dolphin.nautilus.strategy_registration import DolphinStrategyConfig
config = DolphinStrategyConfig(
venue="BINANCE_SPOT",
max_leverage=3.0,
acb_enabled=False,
tp_bps=150
)
assert config.venue == "BINANCE_SPOT"
assert config.max_leverage == 3.0
assert config.acb_enabled is False
assert config.tp_bps == 150
def test_to_dict(self):
"""Test conversion to dictionary."""
from nautilus_dolphin.nautilus.strategy_registration import DolphinStrategyConfig
config = DolphinStrategyConfig(max_leverage=3.0)
config_dict = config.to_dict()
assert config_dict['venue'] == "BINANCE_FUTURES"
assert config_dict['max_leverage'] == 3.0
assert 'acb_enabled' in config_dict
class TestStrategyConfigCreation:
"""Test creation of Nautilus ImportableStrategyConfig."""
def test_create_strategy_config(self):
"""Test creating ImportableStrategyConfig."""
from nautilus_dolphin.nautilus.strategy_registration import (
DolphinStrategyConfig, create_strategy_config
)
config = DolphinStrategyConfig(max_leverage=3.0)
strategy_config = create_strategy_config(config)
assert strategy_config is not None
assert strategy_config.strategy_path == 'nautilus_dolphin.nautilus.strategy:DolphinExecutionStrategy'
assert strategy_config.config_path == 'nautilus_dolphin.nautilus.strategy:DolphinExecutionStrategy'
def test_create_strategy_config_content(self):
"""Test creating ImportableStrategyConfig has correct content."""
from nautilus_dolphin.nautilus.strategy_registration import (
DolphinStrategyConfig, create_strategy_config
)
config = DolphinStrategyConfig(max_leverage=3.0)
strategy_config = create_strategy_config(config)
assert strategy_config.config['max_leverage'] == 3.0
assert strategy_config.strategy_path == 'nautilus_dolphin.nautilus.strategy:DolphinExecutionStrategy'
def test_create_default_strategy_config(self):
"""Test creating default strategy config."""
from nautilus_dolphin.nautilus.strategy_registration import create_default_strategy_config
strategy_config = create_default_strategy_config()
assert strategy_config is not None
assert strategy_config.strategy_path == 'nautilus_dolphin.nautilus.strategy:DolphinExecutionStrategy'
class TestStrategyRegistry:
"""Test StrategyRegistry for managing multiple strategies."""
def test_register_single_strategy(self):
"""Test registering a single strategy."""
from nautilus_dolphin.nautilus.strategy_registration import (
StrategyRegistry, DolphinStrategyConfig
)
registry = StrategyRegistry()
config = DolphinStrategyConfig()
strategy_config = registry.register("dolphin_001", config)
assert strategy_config is not None
assert len(registry.get_configs()) == 1
assert registry.get_config("dolphin_001") == config
def test_register_multiple_strategies(self):
"""Test registering multiple strategies."""
from nautilus_dolphin.nautilus.strategy_registration import (
StrategyRegistry, DolphinStrategyConfig
)
registry = StrategyRegistry()
config1 = DolphinStrategyConfig(max_leverage=3.0)
config2 = DolphinStrategyConfig(max_leverage=5.0)
registry.register("dolphin_3x", config1)
registry.register("dolphin_5x", config2)
assert len(registry.get_configs()) == 2
assert registry.get_config("dolphin_3x").max_leverage == 3.0
assert registry.get_config("dolphin_5x").max_leverage == 5.0
def test_create_backtest_strategies_default(self):
"""Test creating backtest strategies with default config."""
from nautilus_dolphin.nautilus.strategy_registration import StrategyRegistry
registry = StrategyRegistry()
configs = registry.create_backtest_strategies()
assert len(configs) == 1
def test_create_backtest_strategies_variations(self):
"""Test creating backtest strategies with parameter variations."""
from nautilus_dolphin.nautilus.strategy_registration import StrategyRegistry
registry = StrategyRegistry()
variations = [
{'max_leverage': 3.0, 'tp_bps': 50},
{'max_leverage': 5.0, 'tp_bps': 99},
]
configs = registry.create_backtest_strategies(variations)
assert len(configs) == 2
class TestStrategyConfigFromDict:
"""Test creating strategy config from dictionary."""
def test_create_from_dict(self):
"""Test creating strategy config from dictionary."""
from nautilus_dolphin.nautilus.strategy_registration import (
create_strategy_configs_from_dict
)
config_dict = {
'venue': 'BINANCE_FUTURES',
'max_leverage': 3.0,
'acb_enabled': False,
'custom_param': 'value' # Extra param goes to extra_config
}
strategy_config = create_strategy_configs_from_dict(config_dict)
assert strategy_config is not None
# Check that extra params are in the config dict
assert strategy_config.config.get('custom_param') == 'value'
class TestStrategyIntegrationWithLauncher:
"""Test strategy integration with launcher."""
def test_launcher_with_strategy_config(self):
"""Test launcher accepts strategy configuration."""
from nautilus_dolphin.nautilus.launcher import NautilusDolphinLauncher
launcher_config = {
'venue': 'BINANCE_FUTURES',
'environment': 'BACKTEST',
'trader_id': 'TEST-001',
'strategy': {
'venue': 'BINANCE_FUTURES',
'max_leverage': 3.0,
'acb_enabled': True,
}
}
launcher = NautilusDolphinLauncher(launcher_config)
assert 'strategy' in launcher.config
assert launcher.config['strategy']['max_leverage'] == 3.0

View File

@@ -0,0 +1,261 @@
"""
test_subday_exit_fix.py — Regression tests for the ghost trade / subday exit fix.
Root cause: update_acb_boost() called evaluate_subday_exits() → _execute_exit()
directly from a non-scan thread, bypassing _process_scan result processing.
Position was cleared silently with no EXIT log ("ghost trade").
Fix: update_acb_boost() calls evaluate_subday_exits() immediately and returns
the exit dict. Caller (nautilus_event_trader.py / dolphin_actor.py) logs it
at the call site — no scan-cadence delay, no flag deferral.
Tests verify:
1. Exit returned immediately (not deferred) on boost drop
2. Exit NOT returned when boost doesn't meet the trigger condition
3. update_acb_boost() return value contains correct reason + trade_id
4. Position cleared immediately after update_acb_boost() on trigger
5. No double-exit: second call after exit already done returns None
6. TP/SL still fire via _manage_position — unaffected
7. reset() leaves engine clean (no stale state)
8. No-position noop: evaluate_subday_exits() returns None when flat
9. MAX_HOLD still fires at correct bar
10. No re-entry on same bar as immediate exit
Run with:
source /home/dolphin/siloqy_env/bin/activate
cd /mnt/dolphinng5_predict
python -m pytest nautilus_dolphin/tests/test_subday_exit_fix.py -v
"""
import sys
import unittest
from pathlib import Path
HCM_DIR = Path(__file__).parent.parent.parent
sys.path.insert(0, str(HCM_DIR / "nautilus_dolphin"))
from nautilus_dolphin.nautilus.proxy_boost_engine import create_d_liq_engine
def _make_engine_with_position(entry_price: float = 100.0, vel_div: float = -0.05):
"""Build a live D_LIQ engine, run enough bars to be past lookback, then enter a SHORT."""
eng = create_d_liq_engine(
max_leverage=8.0,
abs_max_leverage=9.0,
fraction=0.20,
fixed_tp_pct=0.0095,
stop_pct=1.0, # effectively disabled
max_hold_bars=250,
vel_div_threshold=-0.020,
vel_div_extreme=-0.050,
use_asset_selection=False,
use_ob_edge=False,
use_alpha_layers=False,
use_dynamic_leverage=False,
lookback=10, # small lookback for fast test setup
seed=42,
)
eng.begin_day('2026-04-12', posture='APEX')
prices = {'BTCUSDT': entry_price}
# Run enough bars to pass lookback with no-signal vel_div
for i in range(12):
eng.step_bar(bar_idx=i, vel_div=0.005, prices=prices, vol_regime_ok=True)
# Force entry bar
result = eng.step_bar(bar_idx=12, vel_div=vel_div, prices=prices, vol_regime_ok=True)
assert result.get('entry') is not None, "Setup failed: no entry on trigger bar"
assert eng.position is not None, "Setup failed: position not set after entry"
return eng
class TestSubdayImmediateExit(unittest.TestCase):
def test_exit_returned_immediately_on_boost_drop(self):
"""update_acb_boost: old>=1.25, new<1.10 → exit dict returned immediately, position cleared."""
eng = _make_engine_with_position()
eng._day_base_boost = 1.30 # simulate stressed day
exit_info = eng.update_acb_boost(boost=0.95, beta=0.0)
self.assertIsNotNone(exit_info, "Exit dict must be returned immediately on boost drop")
self.assertEqual(exit_info['reason'], 'SUBDAY_ACB_NORMALIZATION')
self.assertIsNotNone(exit_info.get('trade_id'), "Exit must carry trade_id for caller logging")
# Position must be cleared immediately — no deferred flag
self.assertIsNone(eng.position, "Position must be cleared immediately by update_acb_boost")
def test_no_exit_when_condition_not_met(self):
"""update_acb_boost: condition not met → None returned, position intact."""
eng = _make_engine_with_position()
# old_boost < 1.25 → condition fails
eng._day_base_boost = 1.0
result = eng.update_acb_boost(boost=0.95, beta=0.0)
self.assertIsNone(result, "None must be returned when old_boost < 1.25")
self.assertIsNotNone(eng.position, "Position must be intact when condition not met")
# new_boost >= 1.10 → condition fails
eng._day_base_boost = 1.30
result = eng.update_acb_boost(boost=1.15, beta=0.0)
self.assertIsNone(result, "None must be returned when new_boost >= 1.10")
self.assertIsNotNone(eng.position, "Position must be intact when condition not met")
def test_exit_dict_has_expected_fields(self):
"""Exit dict must carry all fields callers need for logging and CH write."""
eng = _make_engine_with_position()
eng._day_base_boost = 1.30
exit_info = eng.update_acb_boost(boost=0.95, beta=0.0)
self.assertIsNotNone(exit_info)
for field in ('trade_id', 'reason', 'pnl_pct', 'net_pnl', 'bars_held'):
self.assertIn(field, exit_info, f"Exit dict missing field: {field}")
self.assertEqual(exit_info['reason'], 'SUBDAY_ACB_NORMALIZATION')
def test_no_double_exit_second_call(self):
"""After immediate exit, a second boost drop call returns None (position already gone)."""
eng = _make_engine_with_position()
eng._day_base_boost = 1.30
first = eng.update_acb_boost(boost=0.95, beta=0.0)
self.assertIsNotNone(first)
# Second call — no position remaining
eng._day_base_boost = 1.30
second = eng.update_acb_boost(boost=0.90, beta=0.0)
self.assertIsNone(second, "No second exit possible when no position open")
def test_step_bar_after_immediate_exit_has_no_exit(self):
"""After update_acb_boost clears position, next step_bar has no exit in result."""
eng = _make_engine_with_position()
eng._day_base_boost = 1.30
eng.update_acb_boost(boost=0.95, beta=0.0)
prices = {'BTCUSDT': 99.0}
result = eng.step_bar(bar_idx=13, vel_div=0.0, prices=prices, vol_regime_ok=True)
self.assertIsNone(result.get('exit'), "No duplicate exit on next bar after immediate exit")
self.assertIsNone(eng.position)
def test_evaluate_subday_exits_no_position_returns_none(self):
"""evaluate_subday_exits() returns None when no position is open — no crash."""
eng = create_d_liq_engine(
max_leverage=8.0, abs_max_leverage=9.0,
fraction=0.20, fixed_tp_pct=0.0095, stop_pct=1.0,
max_hold_bars=250, vel_div_threshold=-0.020,
vel_div_extreme=-0.050, use_asset_selection=False,
use_ob_edge=False, use_alpha_layers=False,
use_dynamic_leverage=False, lookback=10, seed=42,
)
eng.begin_day('2026-04-12', posture='APEX')
self.assertIsNone(eng.position)
result = eng.evaluate_subday_exits()
self.assertIsNone(result, "Must return None when no position open")
def test_reset_leaves_engine_clean(self):
"""reset() after immediate exit leaves engine in clean state."""
eng = _make_engine_with_position()
eng._day_base_boost = 1.30
eng.update_acb_boost(boost=0.95, beta=0.0)
eng.reset()
self.assertIsNone(eng.position)
# No _pending_subday_exit attribute should exist (flag removed)
self.assertFalse(hasattr(eng, '_pending_subday_exit') and getattr(eng, '_pending_subday_exit', False),
"_pending_subday_exit flag must not be set after reset()")
def test_normal_tp_exit_unaffected(self):
"""FIXED_TP still fires via _manage_position — immediate exit path does not interfere."""
eng = _make_engine_with_position(entry_price=100.0)
# Drive price below TP level for SHORT (entry=100, tp_pct=0.0095 → tp=99.05)
prices = {'BTCUSDT': 99.0} # 1% below entry → above tp threshold
result = eng.step_bar(bar_idx=13, vel_div=0.0, prices=prices, vol_regime_ok=True)
self.assertIsNotNone(result.get('exit'))
self.assertEqual(result['exit']['reason'], 'FIXED_TP',
"TP exit must still fire normally via _manage_position")
self.assertIsNone(eng.position)
def test_max_hold_exit_unaffected(self):
"""MAX_HOLD still fires at the correct bar — not affected by the fix."""
eng = create_d_liq_engine(
max_leverage=8.0, abs_max_leverage=9.0,
fraction=0.20, fixed_tp_pct=0.0095, stop_pct=1.0,
max_hold_bars=5, # tiny max_hold for fast test
vel_div_threshold=-0.020, vel_div_extreme=-0.050,
use_asset_selection=False, use_ob_edge=False,
use_alpha_layers=False, use_dynamic_leverage=False,
lookback=10, seed=42,
)
eng.begin_day('2026-04-12', posture='APEX')
prices = {'BTCUSDT': 100.0}
# Warmup
for i in range(12):
eng.step_bar(bar_idx=i, vel_div=0.005, prices=prices, vol_regime_ok=True)
# Entry
entry_result = eng.step_bar(bar_idx=12, vel_div=-0.05, prices=prices, vol_regime_ok=True)
self.assertIsNotNone(entry_result.get('entry'))
# Advance to max_hold bar (price stays flat — no TP)
exit_result = None
for i in range(13, 13 + 6):
r = eng.step_bar(bar_idx=i, vel_div=0.0, prices=prices, vol_regime_ok=True)
if r.get('exit'):
exit_result = r
break
self.assertIsNotNone(exit_result, "MAX_HOLD must fire within max_hold_bars")
self.assertEqual(exit_result['exit']['reason'], 'MAX_HOLD')
def test_no_reentry_on_same_logical_bar_after_immediate_exit(self):
"""After immediate exit via update_acb_boost, step_bar must not immediately re-enter."""
eng = _make_engine_with_position(entry_price=100.0)
eng._day_base_boost = 1.30
eng.update_acb_boost(boost=0.95, beta=0.0)
# Trigger bar with a strong signal
prices = {'BTCUSDT': 100.0}
result = eng.step_bar(bar_idx=13, vel_div=-0.06, prices=prices, vol_regime_ok=True)
# No position and no re-entry on the bar immediately following immediate exit
self.assertIsNone(result.get('entry'),
"Must not re-enter on the bar immediately after immediate subday exit")
class TestSubdayExitScanCadence(unittest.TestCase):
"""Verify TP/SL are never delayed — they are always in _manage_position path.
SUBDAY_ACB_NORMALIZATION now exits immediately via update_acb_boost() return value.
TP/SL/MAX_HOLD exit via _manage_position on every scan — completely unaffected.
"""
def test_tp_fires_at_scan_cadence_no_delay(self):
"""TP fires at scan cadence normally — no interference from subday path."""
eng = _make_engine_with_position(entry_price=100.0)
prices = {'BTCUSDT': 99.0} # hits TP for SHORT
r = eng.step_bar(bar_idx=13, vel_div=0.0, prices=prices, vol_regime_ok=True)
self.assertEqual(r['exit']['reason'], 'FIXED_TP')
def test_tp_fires_normally_when_no_boost_drop(self):
"""TP fires when boost condition is not met — no interference."""
eng = _make_engine_with_position(entry_price=100.0)
eng._day_base_boost = 1.0 # below 1.25 threshold — no subday exit triggered
# Update boost without triggering subday exit
exit_info = eng.update_acb_boost(boost=0.95, beta=0.0)
self.assertIsNone(exit_info, "No subday exit when old_boost < 1.25")
self.assertIsNotNone(eng.position, "Position still open")
# TP fires normally on next bar
prices = {'BTCUSDT': 99.0}
r = eng.step_bar(bar_idx=13, vel_div=0.0, prices=prices, vol_regime_ok=True)
self.assertEqual(r['exit']['reason'], 'FIXED_TP')
if __name__ == '__main__':
unittest.main(verbosity=2)

View File

@@ -0,0 +1,464 @@
"""
CRITICAL: Trade-by-Trade Validation Test
=========================================
This test runs a Nautilus-Dolphin backtest and compares EVERY TRADE
to the standalone DOLPHIN (itest_v7) reference results.
MUST MATCH with 0.1% tolerance:
- Entry prices
- Exit prices
- P&L calculations
- Exit types
- Bars held
"""
import json
import pytest
import asyncio
from pathlib import Path
from typing import Dict, List, Any, Tuple
from dataclasses import dataclass, asdict
from datetime import datetime
# ── Configuration ────────────────────────────────────────────────────────────
REFERENCE_RESULTS_FILE = Path(__file__).parent.parent.parent / "itest_v7_results.json"
REFERENCE_TRADES_FILE = Path(__file__).parent.parent.parent / "itest_v7_trades.jsonl"
REFERENCE_STRATEGY = "tight_3_3"
# Tolerance for floating point comparisons
PRICE_TOLERANCE = 0.001 # 0.1%
PNL_TOLERANCE = 0.001 # 0.1%
@dataclass
class TradeComparison:
"""Detailed trade comparison record."""
trade_idx: int
asset: str
ref_entry: float
nd_entry: float
entry_diff_pct: float
ref_exit: float
nd_exit: float
exit_diff_pct: float
ref_pnl: float
nd_pnl: float
pnl_diff_pct: float
ref_exit_type: str
nd_exit_type: str
exit_type_match: bool
ref_bars: int
nd_bars: int
bars_match: bool
passed: bool
class TestTradeByTradeValidation:
"""
CRITICAL TEST: Validates EVERY trade matches between ND and standalone.
This test:
1. Loads reference trades from itest_v7
2. Runs ND backtest with identical configuration
3. Compares trade-by-trade
4. Reports ANY discrepancies
"""
@pytest.fixture(scope="class")
def reference_data(self):
"""Load reference data from itest_v7."""
if not REFERENCE_RESULTS_FILE.exists() or not REFERENCE_TRADES_FILE.exists():
pytest.skip("Reference data not available")
# Load results
with open(REFERENCE_RESULTS_FILE, 'r') as f:
results = json.load(f)
# Load trades
trades = []
with open(REFERENCE_TRADES_FILE, 'r') as f:
for line in f:
data = json.loads(line.strip())
if data.get('strategy') == REFERENCE_STRATEGY:
trades.append(data)
return {
'results': results['strategies'][REFERENCE_STRATEGY],
'trades': trades
}
def test_critical_reference_data_loaded(self, reference_data):
"""CRITICAL: Verify reference data is loaded correctly."""
ref_results = reference_data['results']
ref_trades = reference_data['trades']
print(f"\n{'='*70}")
print("CRITICAL: Reference Data Validation")
print(f"{'='*70}")
print(f"Strategy: {REFERENCE_STRATEGY}")
print(f"Total Trades: {len(ref_trades)}")
print(f"Reference Trade Count: {ref_results['trades']}")
print(f"Win Rate: {ref_results['win_rate']:.2f}%")
print(f"ROI: {ref_results['roi_pct']:.2f}%")
print(f"Profit Factor: {ref_results['profit_factor']:.4f}")
# CRITICAL: Must have trades to compare
assert len(ref_trades) > 0, "CRITICAL: No reference trades loaded"
assert len(ref_trades) == ref_results['trades'], "Trade count mismatch"
# Store for later tests
pytest.reference_results = ref_results
pytest.reference_trades = ref_trades
def test_critical_nd_configuration_matches_reference(self):
"""CRITICAL: Verify ND configuration matches itest_v7 exactly."""
from nautilus_dolphin.nautilus.strategy_registration import DolphinStrategyConfig
# tight_3_3 configuration from itest_v7
expected_config = {
'venue': 'BINANCE_FUTURES',
'max_leverage': 2.5,
'capital_fraction': 0.15,
'max_hold_bars': 120,
'irp_alignment_min': 0.45,
'momentum_magnitude_min': 0.000075,
'tp_bps': 99,
}
nd_config = DolphinStrategyConfig(
venue=expected_config['venue'],
max_leverage=expected_config['max_leverage'],
capital_fraction=expected_config['capital_fraction'],
max_hold_bars=expected_config['max_hold_bars'],
irp_alignment_min=expected_config['irp_alignment_min'],
momentum_magnitude_min=expected_config['momentum_magnitude_min'],
tp_bps=expected_config['tp_bps'],
)
print(f"\n{'='*70}")
print("CRITICAL: Configuration Validation")
print(f"{'='*70}")
for key, expected_value in expected_config.items():
actual_value = getattr(nd_config, key)
match = actual_value == expected_value
status = "" if match else ""
print(f"{status} {key}: {actual_value} (expected: {expected_value})")
assert match, f"Configuration mismatch: {key}"
def test_critical_sample_trades_structure(self, reference_data):
"""CRITICAL: Examine structure of sample trades."""
ref_trades = reference_data['trades']
print(f"\n{'='*70}")
print("CRITICAL: Sample Trade Structure")
print(f"{'='*70}")
for i, trade in enumerate(ref_trades[:5]):
print(f"\nTrade {i+1}: {trade['trade_asset']} {trade['direction']}")
print(f" Entry: ${trade['entry_price']:.2f}")
print(f" Exit: ${trade['exit_price']:.2f}")
print(f" Net P&L: ${trade['net_pnl']:.4f}")
print(f" Exit Type: {trade['exit_type']}")
print(f" Bars Held: {trade['bars_held']}")
# Validate required fields exist
required_fields = [
'trade_asset', 'entry_price', 'exit_price', 'net_pnl',
'exit_type', 'bars_held', 'direction'
]
for field in required_fields:
assert field in trade, f"Missing field: {field}"
@pytest.mark.timeout(300) # 5 minute timeout
def test_critical_trade_counts_match(self, reference_data):
"""CRITICAL: ND must produce same number of trades as reference."""
ref_results = reference_data['results']
expected_count = ref_results['trades']
print(f"\n{'='*70}")
print("CRITICAL: Trade Count Validation")
print(f"{'='*70}")
print(f"Expected trades (itest_v7): {expected_count}")
# TODO: Run ND backtest and get actual count
# For now, validate the test framework
print(f"⚠️ ND backtest not yet run - test framework validated")
# This test will pass once ND backtest is implemented
# nd_count = run_nd_backtest_and_get_trade_count()
# assert nd_count == expected_count, f"Trade count mismatch: {nd_count} vs {expected_count}"
def test_critical_first_50_trades_sample(self, reference_data):
"""CRITICAL: Detailed validation of first 50 trades."""
ref_trades = reference_data['trades']
print(f"\n{'='*70}")
print("CRITICAL: First 50 Trades Analysis")
print(f"{'='*70}")
sample = ref_trades[:50]
# Analyze sample
assets = set(t['trade_asset'] for t in sample)
exit_types = {}
for t in sample:
et = t['exit_type']
exit_types[et] = exit_types.get(et, 0) + 1
print(f"Assets traded: {assets}")
print(f"Exit type distribution:")
for et, count in sorted(exit_types.items(), key=lambda x: -x[1]):
print(f" {et}: {count} ({100*count/len(sample):.1f}%)")
# Validate P&L calculations
total_pnl = sum(t['net_pnl'] for t in sample)
winners = sum(1 for t in sample if t['net_pnl'] > 0)
print(f"\nSample Statistics:")
print(f" Total P&L: ${total_pnl:.2f}")
print(f" Winners: {winners}/{len(sample)} ({100*winners/len(sample):.1f}%)")
assert len(sample) == 50, "Sample size mismatch"
@pytest.mark.timeout(600) # 10 minute timeout for full comparison
def test_critical_full_trade_by_trade_comparison(self, reference_data):
"""
CRITICAL: Full trade-by-trade comparison.
This is the MOST IMPORTANT test - validates EVERY trade matches.
"""
ref_trades = reference_data['trades']
ref_results = reference_data['results']
print(f"\n{'='*70}")
print("CRITICAL: Full Trade-by-Trade Comparison")
print(f"{'='*70}")
print(f"Reference trades to validate: {len(ref_trades)}")
print(f"Tolerance: {PRICE_TOLERANCE*100:.2f}% for prices, {PNL_TOLERANCE*100:.2f}% for P&L")
# TODO: Load ND backtest results
# nd_trades = run_nd_backtest_and_get_trades()
# For now, create a placeholder comparison report
print(f"\n⚠️ COMPARISON FRAMEWORK READY")
print(f" - Reference trades loaded: {len(ref_trades)}")
print(f" - Validation criteria defined")
print(f" - Tolerance levels set")
print(f"\n Next: Run ND backtest to generate comparison data")
# Once ND results are available:
# comparisons = compare_trades(ref_trades, nd_trades)
# report_comparison_results(comparisons)
assert len(ref_trades) > 0, "No reference trades to compare"
def test_critical_exit_type_distribution_match(self, reference_data):
"""CRITICAL: Exit type distribution must match."""
ref_results = reference_data['results']
print(f"\n{'='*70}")
print("CRITICAL: Exit Type Distribution")
print(f"{'='*70}")
total = ref_results['trades']
distributions = {
'trailing': ref_results['trailing_exits'],
'stop': ref_results['stop_exits'],
'target': ref_results['target_exits'],
'hold': ref_results['hold_exits']
}
print("Reference distribution:")
for exit_type, count in distributions.items():
pct = 100 * count / total
print(f" {exit_type}: {count} ({pct:.1f}%)")
# Validate totals
total_exits = sum(distributions.values())
assert total_exits == total, f"Exit count mismatch: {total_exits} vs {total}"
def test_critical_profit_loss_calculations(self, reference_data):
"""CRITICAL: P&L calculations must be consistent."""
ref_trades = reference_data['trades']
ref_results = reference_data['results']
print(f"\n{'='*70}")
print("CRITICAL: P&L Calculation Validation")
print(f"{'='*70}")
# Verify aggregate P&L
total_net_pnl = sum(t['net_pnl'] for t in ref_trades)
avg_trade_pnl = total_net_pnl / len(ref_trades)
print(f"Total Net P&L: ${total_net_pnl:.2f}")
print(f"Average per trade: ${avg_trade_pnl:.4f}")
print(f"Winners: {ref_results['wins']} ({ref_results['win_rate']:.2f}%)")
print(f"Profit Factor: {ref_results['profit_factor']:.4f}")
# Validate calculations
calc_win_rate = 100 * ref_results['wins'] / ref_results['trades']
assert abs(calc_win_rate - ref_results['win_rate']) < 0.1, "Win rate mismatch"
class TestNDTradeGeneration:
"""Test that ND can generate trades comparable to reference."""
def test_nd_strategy_can_generate_signals(self):
"""Test that ND strategy generates signals."""
from nautilus_dolphin.nautilus.strategy import DolphinExecutionStrategyForTesting
strategy = DolphinExecutionStrategyForTesting({
'venue': 'BINANCE_FUTURES',
'max_leverage': 2.5,
'capital_fraction': 0.15,
'acb_enabled': False,
})
# Simulate signal generation
test_signals = [
{
'asset': 'BTCUSDT',
'direction': 'SHORT',
'vel_div': -0.025,
'strength': 0.75,
'irp_alignment': 0.5,
'direction_confirm': True,
'lookback_momentum': 0.0001,
'price': 50000.0
},
{
'asset': 'ETHUSDT',
'direction': 'SHORT',
'vel_div': -0.03,
'strength': 0.8,
'irp_alignment': 0.6,
'direction_confirm': True,
'lookback_momentum': 0.00015,
'price': 3000.0
}
]
# Set volatility to high regime
strategy.volatility_detector._regime = 'high'
valid_signals = []
for signal in test_signals:
if strategy._should_trade(signal) == "":
valid_signals.append(signal)
print(f"\nGenerated {len(valid_signals)} valid signals from {len(test_signals)} candidates")
assert len(valid_signals) > 0, "Strategy should generate valid signals"
def test_nd_position_sizing_matches_reference(self):
"""Test ND position sizing matches itest_v7."""
from nautilus_dolphin.nautilus.strategy import DolphinExecutionStrategyForTesting
strategy = DolphinExecutionStrategyForTesting({
'venue': 'BINANCE_FUTURES',
'max_leverage': 2.5,
'capital_fraction': 0.15,
'min_leverage': 0.5,
'leverage_convexity': 3.0,
'acb_enabled': False,
})
# Test signal with 0.75 strength
signal = {
'strength': 0.75,
'bucket_boost': 1.0,
'streak_mult': 1.0,
'trend_mult': 1.0,
}
account_balance = 10000.0
notional = strategy.calculate_position_size(signal, account_balance)
leverage = strategy.calculate_leverage(signal)
# itest_v7: base_notional = 10000 * 0.15 * 2.5 = 3750
expected_base = 10000 * 0.15 * 2.5
print(f"\nPosition Sizing Comparison:")
print(f" Account: ${account_balance:,.2f}")
print(f" ND Notional: ${notional:,.2f}")
print(f" Expected (itest_v7): ${expected_base:,.2f}")
print(f" Calculated Leverage: {leverage:.2f}x")
# Allow for small differences due to convexity
assert notional > 0, "Notional must be positive"
assert 0.5 <= leverage <= 5.0, "Leverage must be in valid range"
# ── Helper Functions for Future Implementation ───────────────────────────────
def compare_trades(ref_trades: List[Dict], nd_trades: List[Dict]) -> List[TradeComparison]:
"""
Compare reference trades to ND trades trade-by-trade.
This function will be used once ND backtest results are available.
"""
comparisons = []
for i, (ref, nd) in enumerate(zip(ref_trades, nd_trades)):
# Calculate differences
entry_diff = abs(ref['entry_price'] - nd['entry_price']) / ref['entry_price']
exit_diff = abs(ref['exit_price'] - nd['exit_price']) / ref['exit_price']
pnl_diff = abs(ref['net_pnl'] - nd['net_pnl']) / max(abs(ref['net_pnl']), 0.01)
comparison = TradeComparison(
trade_idx=i,
asset=ref['trade_asset'],
ref_entry=ref['entry_price'],
nd_entry=nd['entry_price'],
entry_diff_pct=entry_diff * 100,
ref_exit=ref['exit_price'],
nd_exit=nd['exit_price'],
exit_diff_pct=exit_diff * 100,
ref_pnl=ref['net_pnl'],
nd_pnl=nd['net_pnl'],
pnl_diff_pct=pnl_diff * 100,
ref_exit_type=ref['exit_type'],
nd_exit_type=nd['exit_type'],
exit_type_match=ref['exit_type'] == nd['exit_type'],
ref_bars=ref['bars_held'],
nd_bars=nd['bars_held'],
bars_match=ref['bars_held'] == nd['bars_held'],
passed=(
entry_diff <= PRICE_TOLERANCE and
exit_diff <= PRICE_TOLERANCE and
pnl_diff <= PNL_TOLERANCE and
ref['exit_type'] == nd['exit_type'] and
ref['bars_held'] == nd['bars_held']
)
)
comparisons.append(comparison)
return comparisons
def report_comparison_results(comparisons: List[TradeComparison]):
"""Generate detailed comparison report."""
total = len(comparisons)
passed = sum(1 for c in comparisons if c.passed)
failed = total - passed
print(f"\n{'='*70}")
print("TRADE-BY-TRADE COMPARISON RESULTS")
print(f"{'='*70}")
print(f"Total trades compared: {total}")
print(f"Passed: {passed} ({100*passed/total:.1f}%)")
print(f"Failed: {failed} ({100*failed/total:.1f}%)")
if failed > 0:
print(f"\nFirst 5 failures:")
for c in [c for c in comparisons if not c.passed][:5]:
print(f"\n Trade {c.trade_idx}: {c.asset}")
print(f" Entry diff: {c.entry_diff_pct:.4f}%")
print(f" Exit diff: {c.exit_diff_pct:.4f}%")
print(f" P&L diff: {c.pnl_diff_pct:.4f}%")
print(f" Exit type match: {c.exit_type_match}")
print(f" Bars match: {c.bars_match}")
return failed == 0

View File

@@ -0,0 +1,61 @@
"""Tests for VolatilityRegimeDetector."""
import pytest
import numpy as np
from unittest.mock import Mock
from nautilus_dolphin.nautilus.volatility_detector import VolatilityRegimeDetector
class TestVolatilityRegimeDetector:
def test_insufficient_data_returns_true(self):
"""Permissive default when insufficient data."""
detector = VolatilityRegimeDetector(min_history=100)
assert detector.is_high_regime() == True
def test_update_calculates_volatility(self):
"""Test volatility calculation from bars."""
detector = VolatilityRegimeDetector(lookback_bars=50, min_history=10)
# Generate synthetic bars with increasing volatility
for i in range(60):
bar = Mock()
bar.close = 100 + np.random.randn() * (1 + i * 0.1)
detector.update(bar)
assert detector._current_vol is not None
assert len(detector._volatility_history) > 0
def test_high_regime_detection(self):
"""Test dual-threshold regime detection."""
detector = VolatilityRegimeDetector(lookback_bars=50, min_history=10)
# Low volatility period
for i in range(30):
bar = Mock()
bar.close = 100 + np.random.randn() * 0.1
detector.update(bar)
# High volatility period
for i in range(30):
bar = Mock()
bar.close = 100 + np.random.randn() * 5.0
detector.update(bar)
# Should detect high regime
assert detector.is_high_regime() == True
def test_regime_info(self):
"""Test regime info dict."""
detector = VolatilityRegimeDetector(lookback_bars=50, min_history=10)
for i in range(60):
bar = Mock()
bar.close = 100 + np.random.randn()
detector.update(bar)
info = detector.get_regime_info()
assert 'status' in info
assert 'current_vol' in info
assert 'p50' in info
assert 'p75' in info