feat(config/scripts): GREEN config wiring + S6 recompute tooling
prod/configs/green.yml: - asset_bucket_ban_set: [4] (B4 banned at selector level) - s6_size_table: inline bootstrap multipliers (B0→0.4, B1→0.3, B3→2.0, B5→0.5, B6→1.5) matching CRITICAL_ASSET_PICKING S6 scenario - esof_sizing_table: FAV→1.2, MILD_POS→0.6, UNKNOWN→0.25, MILD_NEG→0.0, UNFAV→0.0 - use_int_leverage: true (1x fixed pending winrate analysis) - s6_table_path: pointer to generated YAML (recompute updates this) BLUE (blue.yml) carries none of these keys → BLUE math unchanged. prod/configs/green_s6_table.yml: bootstrap stub with frontmatter (generated_at, source_branch, n_trades). Regenerated by recompute script. prod/scripts/recompute_s6_coefficients.py: Queries trade_events, maps assets to KMeans buckets, derives per-bucket sizing mults. Variance guard: >20% net-PnL move flags bucket in dolphin.s6_recompute_log for manual review before promote. prod/s6_recompute_flow.py: Prefect flow wrapping the recompute script. Cadence via S6_RECOMPUTE_INTERVAL_DAYS env (default 30). Kill-switch: S6_RECOMPUTE_DISABLED=1. prod/scripts/analyze_leverage_winrate.py: Read-only walk of CH trade_events; bins trades by leverage_raw, emits per-bin WR/net-PnL/avg-MAE. Output informs the int-leverage rounding rule choice (Option 1 round-half-up vs Option 2 banker's round vs stay-at-1x). Does not auto-apply a rule change. Plan refs: Tasks 3, 8, 10. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
116
prod/s6_recompute_flow.py
Executable file
116
prod/s6_recompute_flow.py
Executable file
@@ -0,0 +1,116 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Prefect flow — periodically recompute S6 bucket sizing coefficients.
|
||||
|
||||
Cadence
|
||||
───────
|
||||
Controlled by env `S6_RECOMPUTE_INTERVAL_DAYS` (default 30). The flow is
|
||||
idempotent: running it more often than the interval just produces a fresh
|
||||
YAML and markdown report; nothing is auto-promoted.
|
||||
|
||||
Kill-switch
|
||||
───────────
|
||||
Set env `S6_RECOMPUTE_DISABLED=1` to skip — the flow logs and exits 0.
|
||||
|
||||
Wiring into supervisord/cron
|
||||
────────────────────────────
|
||||
Run this flow on the supervisord host. Recommended: daily timer that guards
|
||||
internally on the last-run timestamp in `dolphin.s6_recompute_log`, or a
|
||||
Prefect deployment with a cron schedule `0 3 * * *` and the interval env set.
|
||||
|
||||
This flow shells out to `prod/scripts/recompute_s6_coefficients.py` rather
|
||||
than importing it so that the script remains independently runnable without
|
||||
requiring Prefect — honours the plan's "recompute works whether or not
|
||||
Prefect is installed" requirement.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
|
||||
try:
|
||||
from prefect import flow, task, get_run_logger
|
||||
except ImportError: # pragma: no cover — allow import-only smoke tests
|
||||
def flow(fn=None, **_kw):
|
||||
if fn is None:
|
||||
return lambda f: f
|
||||
return fn
|
||||
def task(fn=None, **_kw):
|
||||
if fn is None:
|
||||
return lambda f: f
|
||||
return fn
|
||||
def get_run_logger(): # pragma: no cover
|
||||
import logging
|
||||
return logging.getLogger("s6_recompute_flow")
|
||||
|
||||
|
||||
REPO_ROOT = Path(__file__).resolve().parents[1]
|
||||
RECOMPUTE_PY = REPO_ROOT / "prod" / "scripts" / "recompute_s6_coefficients.py"
|
||||
TABLE_YML = REPO_ROOT / "prod" / "configs" / "green_s6_table.yml"
|
||||
|
||||
|
||||
@task
|
||||
def check_interval_elapsed(interval_days: int) -> bool:
|
||||
"""Return True if the YAML is missing OR older than the interval."""
|
||||
if not TABLE_YML.exists():
|
||||
return True
|
||||
try:
|
||||
import yaml
|
||||
doc = yaml.safe_load(TABLE_YML.read_text()) or {}
|
||||
ts_str = (doc.get("meta") or {}).get("generated_at", "")
|
||||
if not ts_str:
|
||||
return True
|
||||
ts = datetime.fromisoformat(ts_str.rstrip("Z"))
|
||||
return datetime.utcnow() - ts >= timedelta(days=interval_days)
|
||||
except Exception:
|
||||
# Any parsing trouble → safer to recompute
|
||||
return True
|
||||
|
||||
|
||||
@task
|
||||
def run_recompute(strategy: str, since: str, min_trades: int, source_branch: str) -> int:
|
||||
logger = get_run_logger()
|
||||
cmd = [
|
||||
sys.executable, str(RECOMPUTE_PY),
|
||||
"--strategy", strategy,
|
||||
"--since", since,
|
||||
"--min-trades-per-bucket", str(min_trades),
|
||||
"--source-branch", source_branch,
|
||||
]
|
||||
logger.info(f"[s6_recompute_flow] exec: {' '.join(cmd)}")
|
||||
rc = subprocess.call(cmd, cwd=str(REPO_ROOT))
|
||||
logger.info(f"[s6_recompute_flow] exit code: {rc}")
|
||||
return rc
|
||||
|
||||
|
||||
@flow(name="s6-recompute")
|
||||
def s6_recompute_flow(
|
||||
strategy: str = "blue",
|
||||
since: str = "2026-01-01",
|
||||
min_trades: int = 20,
|
||||
source_branch: str = "exp/green-s6-esof-aem-shadow-2026-04-21",
|
||||
):
|
||||
logger = get_run_logger()
|
||||
|
||||
if os.environ.get("S6_RECOMPUTE_DISABLED") == "1":
|
||||
logger.info("[s6_recompute_flow] disabled via S6_RECOMPUTE_DISABLED=1 — skipping")
|
||||
return 0
|
||||
|
||||
try:
|
||||
interval_days = int(os.environ.get("S6_RECOMPUTE_INTERVAL_DAYS", "30"))
|
||||
except ValueError:
|
||||
interval_days = 30
|
||||
|
||||
due = check_interval_elapsed(interval_days)
|
||||
if not due:
|
||||
logger.info(f"[s6_recompute_flow] interval not elapsed ({interval_days}d) — skipping")
|
||||
return 0
|
||||
|
||||
return run_recompute(strategy, since, min_trades, source_branch)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(int(s6_recompute_flow() or 0))
|
||||
Reference in New Issue
Block a user