initial: import DOLPHIN baseline 2026-04-21 from dolphinng5_predict working tree
Includes core prod + GREEN/BLUE subsystems: - prod/ (BLUE harness, configs, scripts, docs) - nautilus_dolphin/ (GREEN Nautilus-native impl + dvae/ preserved) - adaptive_exit/ (AEM engine + models/bucket_assignments.pkl) - Observability/ (EsoF advisor, TUI, dashboards) - external_factors/ (EsoF producer) - mc_forewarning_qlabs_fork/ (MC regime/envelope) Excludes runtime caches, logs, backups, and reproducible artifacts per .gitignore.
This commit is contained in:
108
nautilus_dolphin/test_forewarner_gamut.py
Executable file
108
nautilus_dolphin/test_forewarner_gamut.py
Executable file
@@ -0,0 +1,108 @@
|
||||
import os
|
||||
import sys
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from collections import Counter
|
||||
from pathlib import Path
|
||||
|
||||
# Adjust paths
|
||||
PROJECT_ROOT = Path(__file__).resolve().parent
|
||||
sys.path.insert(0, str(PROJECT_ROOT))
|
||||
|
||||
from mc.mc_ml import DolphinForewarner
|
||||
from mc.mc_sampler import MCSampler
|
||||
|
||||
MODELS_DIR = PROJECT_ROOT / "mc_results" / "models"
|
||||
|
||||
def determine_risk_level(report):
|
||||
env = report.envelope_score
|
||||
cat = report.catastrophic_probability
|
||||
champ = report.champion_probability
|
||||
|
||||
if cat > 0.25 or env < -1.0:
|
||||
return "RED"
|
||||
elif env < 0 or cat > 0.10:
|
||||
return "ORANGE"
|
||||
elif env > 0 and champ > 0.4:
|
||||
return "AMBER"
|
||||
elif env > 0.5 and champ > 0.6:
|
||||
return "GREEN"
|
||||
else:
|
||||
return "AMBER"
|
||||
|
||||
def run_gamut_test():
|
||||
print("======================================================================")
|
||||
print("MONTE CARLO FOREWARNER GAMUT TEST ('Montecarlo the Montecarlo')")
|
||||
print("======================================================================")
|
||||
|
||||
if not MODELS_DIR.exists():
|
||||
print(f"[ERROR] Models directory not found at {MODELS_DIR}.")
|
||||
return
|
||||
|
||||
print("[1/3] Initializing ML Models...")
|
||||
try:
|
||||
forewarner = DolphinForewarner(models_dir=str(MODELS_DIR))
|
||||
except Exception as e:
|
||||
print(f"[ERROR] Failed to load ML models: {e}")
|
||||
return
|
||||
|
||||
print("[2/3] Generating 2,500 random parameter configurations across the manifold...")
|
||||
sampler = MCSampler(base_seed=1337)
|
||||
# Generate a large set of raw trials (they don't even need to be strictly 'valid'
|
||||
# for the ML model to assess them, but we'll use the sampler to get structurally sound vectors)
|
||||
trials = sampler.generate_trials(n_samples_per_switch=10, max_trials=2500)
|
||||
print(f" Generated {len(trials)} unique parameter vectors.")
|
||||
|
||||
print("[3/3] Feeding gamut into the Forewarning Alert System...")
|
||||
|
||||
results = []
|
||||
level_counts = Counter()
|
||||
|
||||
for i, trial in enumerate(trials):
|
||||
config_dict = trial.to_dict()
|
||||
report = forewarner.assess_config_dict(config_dict)
|
||||
level = determine_risk_level(report)
|
||||
|
||||
level_counts[level] += 1
|
||||
results.append({
|
||||
'trial_id': i,
|
||||
'level': level,
|
||||
'env_score': report.envelope_score,
|
||||
'cat_prob': report.catastrophic_probability,
|
||||
'champ_prob': report.champion_probability,
|
||||
'predicted_roi': report.predicted_roi,
|
||||
'config': config_dict
|
||||
})
|
||||
|
||||
if (i + 1) % 500 == 0:
|
||||
print(f" Assessed {i + 1} / {len(trials)} configs...")
|
||||
|
||||
print("\n======================================================================")
|
||||
print("FOREWARNER RESPONSE DISTRIBUTION")
|
||||
print("======================================================================")
|
||||
|
||||
total = len(trials)
|
||||
print(f"GREEN (Safe / Optimal): {level_counts['GREEN']:>4} ({level_counts['GREEN']/total*100:.1f}%)")
|
||||
print(f"AMBER (Transitional): {level_counts['AMBER']:>4} ({level_counts['AMBER']/total*100:.1f}%)")
|
||||
print(f"ORANGE (High Risk Warning): {level_counts['ORANGE']:>4} ({level_counts['ORANGE']/total*100:.1f}%)")
|
||||
print(f"RED (Catastrophic Alert): {level_counts['RED']:>4} ({level_counts['RED']/total*100:.1f}%)")
|
||||
|
||||
print("\n======================================================================")
|
||||
print("SAMPLE EXTREMUM PROFILES")
|
||||
print("======================================================================")
|
||||
|
||||
# Sort by envelope score
|
||||
results_sorted = sorted(results, key=lambda x: x['env_score'])
|
||||
|
||||
print("\\n--- WORST 'RED' CONFIGURATION DETECTED ---")
|
||||
worst = results_sorted[0]
|
||||
print(f"Envelope Score: {worst['env_score']:.3f} | Catastrophic Prob: {worst['cat_prob']:.1%} | Predicted ROI: {worst['predicted_roi']:.1f}%")
|
||||
print(f"Key Triggers: max_leverage={worst['config']['max_leverage']:.2f}, fraction={worst['config']['fraction']:.2f}, vel_div_extreme={worst['config']['vel_div_extreme']:.4f}")
|
||||
|
||||
print("\n--- BEST 'GREEN' CONFIGURATION DETECTED ---")
|
||||
best = results_sorted[-1]
|
||||
print(f"Envelope Score: {best['env_score']:.3f} | Champion Prob: {best['champ_prob']:.1%} | Predicted ROI: {best['predicted_roi']:.1f}%")
|
||||
print(f"Key Triggers: max_leverage={best['config']['max_leverage']:.2f}, fraction={best['config']['fraction']:.2f}, vel_div_extreme={best['config']['vel_div_extreme']:.4f}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_gamut_test()
|
||||
Reference in New Issue
Block a user