initial: import DOLPHIN baseline 2026-04-21 from dolphinng5_predict working tree
Includes core prod + GREEN/BLUE subsystems: - prod/ (BLUE harness, configs, scripts, docs) - nautilus_dolphin/ (GREEN Nautilus-native impl + dvae/ preserved) - adaptive_exit/ (AEM engine + models/bucket_assignments.pkl) - Observability/ (EsoF advisor, TUI, dashboards) - external_factors/ (EsoF producer) - mc_forewarning_qlabs_fork/ (MC regime/envelope) Excludes runtime caches, logs, backups, and reproducible artifacts per .gitignore.
This commit is contained in:
191
nautilus_dolphin/run_all_tests.py
Executable file
191
nautilus_dolphin/run_all_tests.py
Executable file
@@ -0,0 +1,191 @@
|
||||
"""
|
||||
Nautilus-Dolphin Test Runner
|
||||
============================
|
||||
|
||||
Runs all tests in sequence and logs results.
|
||||
Creates a report of passing/failing tests with error details.
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
|
||||
# Test files to run (in order)
|
||||
TEST_FILES = [
|
||||
# Core ACB tests (our new implementation)
|
||||
'tests/test_acb_standalone.py',
|
||||
'tests/test_adaptive_circuit_breaker.py',
|
||||
|
||||
# Original Nautilus-Dolphin tests
|
||||
'tests/test_circuit_breaker.py',
|
||||
'tests/test_metrics_monitor.py',
|
||||
'tests/test_position_manager.py',
|
||||
'tests/test_signal_bridge.py',
|
||||
'tests/test_smart_exec_algorithm.py',
|
||||
'tests/test_strategy.py',
|
||||
'tests/test_volatility_detector.py',
|
||||
|
||||
# Comparison test (requires both)
|
||||
'tests/test_acb_nautilus_vs_reference.py',
|
||||
]
|
||||
|
||||
def run_test(test_file):
|
||||
"""Run a single test file and return results."""
|
||||
print(f"\n{'='*80}")
|
||||
print(f"Running: {test_file}")
|
||||
print('='*80)
|
||||
|
||||
test_path = Path(__file__).parent / test_file
|
||||
|
||||
if not test_path.exists():
|
||||
print(f"[SKIP] File not found: {test_file}")
|
||||
return {
|
||||
'file': test_file,
|
||||
'status': 'NOT_FOUND',
|
||||
'returncode': None,
|
||||
'stdout': '',
|
||||
'stderr': ''
|
||||
}
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[sys.executable, '-m', 'pytest', str(test_path), '-v', '--tb=short'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=60
|
||||
)
|
||||
|
||||
status = 'PASS' if result.returncode == 0 else 'FAIL'
|
||||
|
||||
print(f"\n[RESULT] {status} (exit code: {result.returncode})")
|
||||
|
||||
# Print relevant output
|
||||
if result.stdout:
|
||||
# Extract test summary
|
||||
lines = result.stdout.split('\n')
|
||||
for line in lines:
|
||||
if any(x in line for x in ['PASSED', 'FAILED', 'ERROR', 'test_', '::']):
|
||||
print(line)
|
||||
|
||||
if result.stderr and result.returncode != 0:
|
||||
print("\n[STDERR]")
|
||||
print(result.stderr[:2000]) # Limit output
|
||||
|
||||
return {
|
||||
'file': test_file,
|
||||
'status': status,
|
||||
'returncode': result.returncode,
|
||||
'stdout': result.stdout,
|
||||
'stderr': result.stderr
|
||||
}
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
print(f"[TIMEOUT] Test exceeded 60 seconds")
|
||||
return {
|
||||
'file': test_file,
|
||||
'status': 'TIMEOUT',
|
||||
'returncode': -1,
|
||||
'stdout': '',
|
||||
'stderr': 'Test timeout'
|
||||
}
|
||||
except Exception as e:
|
||||
print(f"[ERROR] {e}")
|
||||
return {
|
||||
'file': test_file,
|
||||
'status': 'ERROR',
|
||||
'returncode': -1,
|
||||
'stdout': '',
|
||||
'stderr': str(e)
|
||||
}
|
||||
|
||||
def main():
|
||||
"""Run all tests and generate report."""
|
||||
print("="*80)
|
||||
print("NAUTILUS-DOLPHIN TEST SUITE")
|
||||
print(f"Started: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
print("="*80)
|
||||
|
||||
results = []
|
||||
|
||||
for test_file in TEST_FILES:
|
||||
result = run_test(test_file)
|
||||
results.append(result)
|
||||
|
||||
# Generate summary report
|
||||
print("\n" + "="*80)
|
||||
print("TEST SUMMARY REPORT")
|
||||
print("="*80)
|
||||
|
||||
passed = sum(1 for r in results if r['status'] == 'PASS')
|
||||
failed = sum(1 for r in results if r['status'] == 'FAIL')
|
||||
skipped = sum(1 for r in results if r['status'] == 'NOT_FOUND')
|
||||
errors = sum(1 for r in results if r['status'] in ['ERROR', 'TIMEOUT'])
|
||||
|
||||
print(f"\nTotal Tests: {len(results)}")
|
||||
print(f" PASSED: {passed}")
|
||||
print(f" FAILED: {failed}")
|
||||
print(f" ERRORS: {errors}")
|
||||
print(f" SKIPPED: {skipped}")
|
||||
|
||||
print("\n" + "-"*80)
|
||||
print("DETAILED RESULTS")
|
||||
print("-"*80)
|
||||
|
||||
for r in results:
|
||||
status_symbol = {
|
||||
'PASS': '[PASS]',
|
||||
'FAIL': '[FAIL]',
|
||||
'NOT_FOUND': '[SKIP]',
|
||||
'ERROR': '[ERR]',
|
||||
'TIMEOUT': '[TIME]'
|
||||
}.get(r['status'], '[???]')
|
||||
|
||||
print(f"{status_symbol} {r['file']:<50} {r['status']}")
|
||||
|
||||
# List failures for fixing
|
||||
if failed > 0 or errors > 0:
|
||||
print("\n" + "="*80)
|
||||
print("TESTS NEEDING FIXES")
|
||||
print("="*80)
|
||||
|
||||
for r in results:
|
||||
if r['status'] in ['FAIL', 'ERROR', 'TIMEOUT']:
|
||||
print(f"\n{r['file']}:")
|
||||
print(f" Status: {r['status']}")
|
||||
if r['stderr']:
|
||||
# Extract key error info
|
||||
stderr_lines = r['stderr'].split('\n')
|
||||
for line in stderr_lines[:20]:
|
||||
if any(x in line for x in ['Error', 'error', 'ImportError', 'ModuleNotFoundError', 'AssertionError']):
|
||||
print(f" Error: {line.strip()}")
|
||||
|
||||
# Save report to file
|
||||
report_file = Path(__file__).parent / 'test_report.txt'
|
||||
with open(report_file, 'w') as f:
|
||||
f.write("NAUTILUS-DOLPHIN TEST REPORT\n")
|
||||
f.write(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
|
||||
f.write("="*80 + "\n\n")
|
||||
|
||||
f.write(f"Total Tests: {len(results)}\n")
|
||||
f.write(f" PASSED: {passed}\n")
|
||||
f.write(f" FAILED: {failed}\n")
|
||||
f.write(f" ERRORS: {errors}\n")
|
||||
f.write(f" SKIPPED: {skipped}\n\n")
|
||||
|
||||
f.write("DETAILED RESULTS:\n")
|
||||
f.write("-"*80 + "\n")
|
||||
|
||||
for r in results:
|
||||
f.write(f"[{r['status']}] {r['file']}\n")
|
||||
if r['status'] != 'PASS' and r['stderr']:
|
||||
f.write(f" Error excerpt: {r['stderr'][:500]}\n")
|
||||
f.write("\n")
|
||||
|
||||
print(f"\n[Report saved to: {report_file}]")
|
||||
|
||||
return failed == 0 and errors == 0
|
||||
|
||||
if __name__ == '__main__':
|
||||
success = main()
|
||||
sys.exit(0 if success else 1)
|
||||
Reference in New Issue
Block a user