initial: import DOLPHIN baseline 2026-04-21 from dolphinng5_predict working tree

Includes core prod + GREEN/BLUE subsystems:
- prod/ (BLUE harness, configs, scripts, docs)
- nautilus_dolphin/ (GREEN Nautilus-native impl + dvae/ preserved)
- adaptive_exit/ (AEM engine + models/bucket_assignments.pkl)
- Observability/ (EsoF advisor, TUI, dashboards)
- external_factors/ (EsoF producer)
- mc_forewarning_qlabs_fork/ (MC regime/envelope)

Excludes runtime caches, logs, backups, and reproducible artifacts per .gitignore.
This commit is contained in:
hjnormey
2026-04-21 16:58:38 +02:00
commit 01c19662cb
643 changed files with 260241 additions and 0 deletions

View File

@@ -0,0 +1,370 @@
"""
Monte Carlo Envelope Mapper CLI
===============================
Command-line interface for running Monte Carlo envelope mapping
of the Nautilus-Dolphin trading system.
Usage:
python run_mc_envelope.py --mode run --stage 1 --n-samples 500
python run_mc_envelope.py --mode train --output-dir mc_results/
python run_mc_envelope.py --mode assess --assess my_config.json
Reference: MONTE_CARLO_SYSTEM_ENVELOPE_SPEC.md Section 11
"""
import argparse
import json
import sys
from pathlib import Path
# Add parent to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent))
def create_parser() -> argparse.ArgumentParser:
"""Create argument parser."""
parser = argparse.ArgumentParser(
description="Monte Carlo System Envelope Mapper for DOLPHIN NG",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Run full envelope mapping
python run_mc_envelope.py --mode run --n-samples 500 --n-workers 7
# Train ML models on completed results
python run_mc_envelope.py --mode train
# Assess a configuration file
python run_mc_envelope.py --mode assess --assess config.json
# Generate summary report
python run_mc_envelope.py --mode report
"""
)
parser.add_argument(
'--mode',
choices=['sample', 'validate', 'run', 'train', 'assess', 'report'],
default='run',
help='Operation mode (default: run)'
)
parser.add_argument(
'--n-samples',
type=int,
default=500,
help='Samples per switch vector (default: 500)'
)
parser.add_argument(
'--n-workers',
type=int,
default=-1,
help='Parallel workers (-1 for auto, default: auto)'
)
parser.add_argument(
'--batch-size',
type=int,
default=1000,
help='Trials per batch file (default: 1000)'
)
parser.add_argument(
'--output-dir',
type=str,
default='mc_results',
help='Results directory (default: mc_results/)'
)
parser.add_argument(
'--stage',
type=int,
choices=[1, 2],
default=1,
help='Stage: 1=reduced, 2=full (default: 1)'
)
parser.add_argument(
'--seed',
type=int,
default=42,
help='Master RNG seed (default: 42)'
)
parser.add_argument(
'--config',
type=str,
help='JSON config file for parameter overrides'
)
parser.add_argument(
'--resume',
action='store_true',
help='Resume from existing results'
)
parser.add_argument(
'--assess',
type=str,
help='JSON file with config to assess (for mode=assess)'
)
parser.add_argument(
'--max-trials',
type=int,
help='Maximum total trials (for testing)'
)
parser.add_argument(
'--quiet',
action='store_true',
help='Reduce output verbosity'
)
return parser
def cmd_sample(args):
"""Sample configurations only."""
from mc import MCSampler
print("="*70)
print("MONTE CARLO CONFIGURATION SAMPLER")
print("="*70)
sampler = MCSampler(base_seed=args.seed)
print(f"\nGenerating trials (n_samples_per_switch={args.n_samples})...")
trials = sampler.generate_trials(
n_samples_per_switch=args.n_samples,
max_trials=args.max_trials
)
# Save
output_path = Path(args.output_dir) / "manifests" / "all_configs.json"
sampler.save_trials(trials, output_path)
print(f"\n[OK] Generated and saved {len(trials)} configurations")
return 0
def cmd_validate(args):
"""Validate configurations."""
from mc import MCSampler, MCValidator
print("="*70)
print("MONTE CARLO CONFIGURATION VALIDATOR")
print("="*70)
# Load configurations
config_path = Path(args.output_dir) / "manifests" / "all_configs.json"
if not config_path.exists():
print(f"[ERROR] Configurations not found: {config_path}")
print("Run with --mode sample first")
return 1
sampler = MCSampler()
trials = sampler.load_trials(config_path)
print(f"\nValidating {len(trials)} configurations...")
validator = MCValidator(verbose=not args.quiet)
results = validator.validate_batch(trials)
# Stats
stats = validator.get_validity_stats(results)
print(f"\n{'='*70}")
print("VALIDATION RESULTS")
print(f"{'='*70}")
print(f"Total: {stats['total']}")
print(f"Valid: {stats['valid']} ({stats['validity_rate']*100:.1f}%)")
print(f"Rejected V1 (range): {stats.get('rejected_v1', 0)}")
print(f"Rejected V2 (constraints): {stats.get('rejected_v2', 0)}")
print(f"Rejected V3 (cross-group): {stats.get('rejected_v3', 0)}")
print(f"Rejected V4 (degenerate): {stats.get('rejected_v4', 0)}")
# Save validation results
output_path = Path(args.output_dir) / "manifests" / "validation_results.json"
with open(output_path, 'w') as f:
json.dump([r.to_dict() for r in results], f, indent=2)
print(f"\n[OK] Validation results saved: {output_path}")
return 0
def cmd_run(args):
"""Run full envelope mapping."""
from mc import MCRunner
print("="*70)
print("MONTE CARLO ENVELOPE MAPPER")
print("="*70)
print(f"Mode: {'Stage 1 (reduced)' if args.stage == 1 else 'Stage 2 (full)'}")
print(f"Samples per switch: {args.n_samples}")
print(f"Workers: {args.n_workers if args.n_workers > 0 else 'auto'}")
print(f"Output: {args.output_dir}")
print(f"Seed: {args.seed}")
print(f"Resume: {args.resume}")
print("="*70)
runner = MCRunner(
output_dir=args.output_dir,
n_workers=args.n_workers,
batch_size=args.batch_size,
base_seed=args.seed,
verbose=not args.quiet
)
stats = runner.run_envelope_mapping(
n_samples_per_switch=args.n_samples,
max_trials=args.max_trials,
resume=args.resume
)
# Save stats
stats_path = Path(args.output_dir) / "run_stats.json"
with open(stats_path, 'w') as f:
json.dump(stats, f, indent=2, default=str)
print(f"\n[OK] Run complete. Stats saved: {stats_path}")
return 0
def cmd_train(args):
"""Train ML models."""
from mc import MCML
print("="*70)
print("MONTE CARLO ML TRAINER")
print("="*70)
ml = MCML(output_dir=args.output_dir)
try:
results = ml.train_all_models()
print("\n[OK] Training complete")
return 0
except Exception as e:
print(f"\n[ERROR] Training failed: {e}")
import traceback
traceback.print_exc()
return 1
def cmd_assess(args):
"""Assess a configuration."""
from mc import DolphinForewarner, MCTrialConfig
if not args.assess:
print("[ERROR] --assess flag required with path to config JSON")
return 1
config_path = Path(args.assess)
if not config_path.exists():
print(f"[ERROR] Config file not found: {config_path}")
return 1
print("="*70)
print("DOLPHIN FOREWARNING ASSESSMENT")
print("="*70)
# Load config
with open(config_path, 'r') as f:
config_dict = json.load(f)
# Create forewarner
forewarner = DolphinForewarner(models_dir=f"{args.output_dir}/models")
# Assess
if 'trial_id' in config_dict:
config = MCTrialConfig.from_dict(config_dict)
else:
# Assume flat config
config = MCTrialConfig(**config_dict)
report = forewarner.assess(config)
# Print report
print(f"\nConfiguration:")
print(f" vel_div_threshold: {config.vel_div_threshold}")
print(f" max_leverage: {config.max_leverage}")
print(f" fraction: {config.fraction}")
print(f"\nPredictions:")
print(f" ROI: {report.predicted_roi:.2f}%")
print(f" Max DD: {report.predicted_max_dd:.2f}%")
print(f" Champion probability: {report.champion_probability:.1%}")
print(f" Catastrophic probability: {report.catastrophic_probability:.1%}")
print(f" Envelope score: {report.envelope_score:.2f}")
print(f"\nWarnings:")
if report.warnings:
for w in report.warnings:
print(f" ! {w}")
else:
print(" (none)")
# Save report
report_path = Path(args.output_dir) / "forewarning_report.json"
with open(report_path, 'w') as f:
json.dump(report.to_dict(), f, indent=2, default=str)
print(f"\n[OK] Report saved: {report_path}")
return 0
def cmd_report(args):
"""Generate summary report."""
from mc import MCRunner
print("="*70)
print("MONTE CARLO REPORT GENERATOR")
print("="*70)
runner = MCRunner(output_dir=args.output_dir)
report = runner.generate_report(
output_path=f"{args.output_dir}/envelope_report.md"
)
print(report)
return 0
def main():
"""Main entry point."""
parser = create_parser()
args = parser.parse_args()
# Dispatch
try:
if args.mode == 'sample':
return cmd_sample(args)
elif args.mode == 'validate':
return cmd_validate(args)
elif args.mode == 'run':
return cmd_run(args)
elif args.mode == 'train':
return cmd_train(args)
elif args.mode == 'assess':
return cmd_assess(args)
elif args.mode == 'report':
return cmd_report(args)
else:
print(f"[ERROR] Unknown mode: {args.mode}")
return 1
except KeyboardInterrupt:
print("\n\n[INTERRUPTED] Stopping...")
return 130
except Exception as e:
print(f"\n[ERROR] {e}")
import traceback
traceback.print_exc()
return 1
if __name__ == "__main__":
sys.exit(main())