Loading source
Pulling the file list, source metadata, and syntax-aware rendering for this listing.
Source from repo
Create, test, and iteratively improve Claude skills with eval benchmarks and description optimization
Files
Skill
Size
Entrypoint
Format
Open file
Syntax-highlighted preview of this file as included in the skill package.
scripts/run_loop.py
1#!/usr/bin/env python32"""Run the eval + improve loop until all pass or max iterations reached.34Combines run_eval.py and improve_description.py in a loop, tracking history5and returning the best description found. Supports train/test split to prevent6overfitting.7"""89import argparse10import json11import random12import sys13import tempfile14import time15import webbrowser16from pathlib import Path1718from scripts.generate_report import generate_html19from scripts.improve_description import improve_description20from scripts.run_eval import find_project_root, run_eval21from scripts.utils import parse_skill_md222324def split_eval_set(eval_set: list[dict], holdout: float, seed: int = 42) -> tuple[list[dict], list[dict]]:25"""Split eval set into train and test sets, stratified by should_trigger."""26random.seed(seed)2728# Separate by should_trigger29trigger = [e for e in eval_set if e["should_trigger"]]30no_trigger = [e for e in eval_set if not e["should_trigger"]]3132# Shuffle each group33random.shuffle(trigger)34random.shuffle(no_trigger)3536# Calculate split points37n_trigger_test = max(1, int(len(trigger) * holdout))38n_no_trigger_test = max(1, int(len(no_trigger) * holdout))3940# Split41test_set = trigger[:n_trigger_test] + no_trigger[:n_no_trigger_test]42train_set = trigger[n_trigger_test:] + no_trigger[n_no_trigger_test:]4344return train_set, test_set454647def run_loop(48eval_set: list[dict],49skill_path: Path,50description_override: str | None,51num_workers: int,52timeout: int,53max_iterations: int,54runs_per_query: int,55trigger_threshold: float,56holdout: float,57model: str,58verbose: bool,59live_report_path: Path | None = None,60log_dir: Path | None = None,61) -> dict:62"""Run the eval + improvement loop."""63project_root = find_project_root()64name, original_description, content = parse_skill_md(skill_path)65current_description = description_override or original_description6667# Split into train/test if holdout > 068if holdout > 0:69train_set, test_set = split_eval_set(eval_set, holdout)70if verbose:71print(f"Split: {len(train_set)} train, {len(test_set)} test (holdout={holdout})", file=sys.stderr)72else:73train_set = eval_set74test_set = []7576history = []77exit_reason = "unknown"7879for iteration in range(1, max_iterations + 1):80if verbose:81print(f"\n{'='*60}", file=sys.stderr)82print(f"Iteration {iteration}/{max_iterations}", file=sys.stderr)83print(f"Description: {current_description}", file=sys.stderr)84print(f"{'='*60}", file=sys.stderr)8586# Evaluate train + test together in one batch for parallelism87all_queries = train_set + test_set88t0 = time.time()89all_results = run_eval(90eval_set=all_queries,91skill_name=name,92description=current_description,93num_workers=num_workers,94timeout=timeout,95project_root=project_root,96runs_per_query=runs_per_query,97trigger_threshold=trigger_threshold,98model=model,99)100eval_elapsed = time.time() - t0101102# Split results back into train/test by matching queries103train_queries_set = {q["query"] for q in train_set}104train_result_list = [r for r in all_results["results"] if r["query"] in train_queries_set]105test_result_list = [r for r in all_results["results"] if r["query"] not in train_queries_set]106107train_passed = sum(1 for r in train_result_list if r["pass"])108train_total = len(train_result_list)109train_summary = {"passed": train_passed, "failed": train_total - train_passed, "total": train_total}110train_results = {"results": train_result_list, "summary": train_summary}111112if test_set:113test_passed = sum(1 for r in test_result_list if r["pass"])114test_total = len(test_result_list)115test_summary = {"passed": test_passed, "failed": test_total - test_passed, "total": test_total}116test_results = {"results": test_result_list, "summary": test_summary}117else:118test_results = None119test_summary = None120121history.append({122"iteration": iteration,123"description": current_description,124"train_passed": train_summary["passed"],125"train_failed": train_summary["failed"],126"train_total": train_summary["total"],127"train_results": train_results["results"],128"test_passed": test_summary["passed"] if test_summary else None,129"test_failed": test_summary["failed"] if test_summary else None,130"test_total": test_summary["total"] if test_summary else None,131"test_results": test_results["results"] if test_results else None,132# For backward compat with report generator133"passed": train_summary["passed"],134"failed": train_summary["failed"],135"total": train_summary["total"],136"results": train_results["results"],137})138139# Write live report if path provided140if live_report_path:141partial_output = {142"original_description": original_description,143"best_description": current_description,144"best_score": "in progress",145"iterations_run": len(history),146"holdout": holdout,147"train_size": len(train_set),148"test_size": len(test_set),149"history": history,150}151live_report_path.write_text(generate_html(partial_output, auto_refresh=True, skill_name=name))152153if verbose:154def print_eval_stats(label, results, elapsed):155pos = [r for r in results if r["should_trigger"]]156neg = [r for r in results if not r["should_trigger"]]157tp = sum(r["triggers"] for r in pos)158pos_runs = sum(r["runs"] for r in pos)159fn = pos_runs - tp160fp = sum(r["triggers"] for r in neg)161neg_runs = sum(r["runs"] for r in neg)162tn = neg_runs - fp163total = tp + tn + fp + fn164precision = tp / (tp + fp) if (tp + fp) > 0 else 1.0165recall = tp / (tp + fn) if (tp + fn) > 0 else 1.0166accuracy = (tp + tn) / total if total > 0 else 0.0167print(f"{label}: {tp+tn}/{total} correct, precision={precision:.0%} recall={recall:.0%} accuracy={accuracy:.0%} ({elapsed:.1f}s)", file=sys.stderr)168for r in results:169status = "PASS" if r["pass"] else "FAIL"170rate_str = f"{r['triggers']}/{r['runs']}"171print(f" [{status}] rate={rate_str} expected={r['should_trigger']}: {r['query'][:60]}", file=sys.stderr)172173print_eval_stats("Train", train_results["results"], eval_elapsed)174if test_summary:175print_eval_stats("Test ", test_results["results"], 0)176177if train_summary["failed"] == 0:178exit_reason = f"all_passed (iteration {iteration})"179if verbose:180print(f"\nAll train queries passed on iteration {iteration}!", file=sys.stderr)181break182183if iteration == max_iterations:184exit_reason = f"max_iterations ({max_iterations})"185if verbose:186print(f"\nMax iterations reached ({max_iterations}).", file=sys.stderr)187break188189# Improve the description based on train results190if verbose:191print(f"\nImproving description...", file=sys.stderr)192193t0 = time.time()194# Strip test scores from history so improvement model can't see them195blinded_history = [196{k: v for k, v in h.items() if not k.startswith("test_")}197for h in history198]199new_description = improve_description(200skill_name=name,201skill_content=content,202current_description=current_description,203eval_results=train_results,204history=blinded_history,205model=model,206log_dir=log_dir,207iteration=iteration,208)209improve_elapsed = time.time() - t0210211if verbose:212print(f"Proposed ({improve_elapsed:.1f}s): {new_description}", file=sys.stderr)213214current_description = new_description215216# Find the best iteration by TEST score (or train if no test set)217if test_set:218best = max(history, key=lambda h: h["test_passed"] or 0)219best_score = f"{best['test_passed']}/{best['test_total']}"220else:221best = max(history, key=lambda h: h["train_passed"])222best_score = f"{best['train_passed']}/{best['train_total']}"223224if verbose:225print(f"\nExit reason: {exit_reason}", file=sys.stderr)226print(f"Best score: {best_score} (iteration {best['iteration']})", file=sys.stderr)227228return {229"exit_reason": exit_reason,230"original_description": original_description,231"best_description": best["description"],232"best_score": best_score,233"best_train_score": f"{best['train_passed']}/{best['train_total']}",234"best_test_score": f"{best['test_passed']}/{best['test_total']}" if test_set else None,235"final_description": current_description,236"iterations_run": len(history),237"holdout": holdout,238"train_size": len(train_set),239"test_size": len(test_set),240"history": history,241}242243244def main():245parser = argparse.ArgumentParser(description="Run eval + improve loop")246parser.add_argument("--eval-set", required=True, help="Path to eval set JSON file")247parser.add_argument("--skill-path", required=True, help="Path to skill directory")248parser.add_argument("--description", default=None, help="Override starting description")249parser.add_argument("--num-workers", type=int, default=10, help="Number of parallel workers")250parser.add_argument("--timeout", type=int, default=30, help="Timeout per query in seconds")251parser.add_argument("--max-iterations", type=int, default=5, help="Max improvement iterations")252parser.add_argument("--runs-per-query", type=int, default=3, help="Number of runs per query")253parser.add_argument("--trigger-threshold", type=float, default=0.5, help="Trigger rate threshold")254parser.add_argument("--holdout", type=float, default=0.4, help="Fraction of eval set to hold out for testing (0 to disable)")255parser.add_argument("--model", required=True, help="Model for improvement")256parser.add_argument("--verbose", action="store_true", help="Print progress to stderr")257parser.add_argument("--report", default="auto", help="Generate HTML report at this path (default: 'auto' for temp file, 'none' to disable)")258parser.add_argument("--results-dir", default=None, help="Save all outputs (results.json, report.html, log.txt) to a timestamped subdirectory here")259args = parser.parse_args()260261eval_set = json.loads(Path(args.eval_set).read_text())262skill_path = Path(args.skill_path)263264if not (skill_path / "SKILL.md").exists():265print(f"Error: No SKILL.md found at {skill_path}", file=sys.stderr)266sys.exit(1)267268name, _, _ = parse_skill_md(skill_path)269270# Set up live report path271if args.report != "none":272if args.report == "auto":273timestamp = time.strftime("%Y%m%d_%H%M%S")274live_report_path = Path(tempfile.gettempdir()) / f"skill_description_report_{skill_path.name}_{timestamp}.html"275else:276live_report_path = Path(args.report)277# Open the report immediately so the user can watch278live_report_path.write_text("<html><body><h1>Starting optimization loop...</h1><meta http-equiv='refresh' content='5'></body></html>")279webbrowser.open(str(live_report_path))280else:281live_report_path = None282283# Determine output directory (create before run_loop so logs can be written)284if args.results_dir:285timestamp = time.strftime("%Y-%m-%d_%H%M%S")286results_dir = Path(args.results_dir) / timestamp287results_dir.mkdir(parents=True, exist_ok=True)288else:289results_dir = None290291log_dir = results_dir / "logs" if results_dir else None292293output = run_loop(294eval_set=eval_set,295skill_path=skill_path,296description_override=args.description,297num_workers=args.num_workers,298timeout=args.timeout,299max_iterations=args.max_iterations,300runs_per_query=args.runs_per_query,301trigger_threshold=args.trigger_threshold,302holdout=args.holdout,303model=args.model,304verbose=args.verbose,305live_report_path=live_report_path,306log_dir=log_dir,307)308309# Save JSON output310json_output = json.dumps(output, indent=2)311print(json_output)312if results_dir:313(results_dir / "results.json").write_text(json_output)314315# Write final HTML report (without auto-refresh)316if live_report_path:317live_report_path.write_text(generate_html(output, auto_refresh=False, skill_name=name))318print(f"\nReport: {live_report_path}", file=sys.stderr)319320if results_dir and live_report_path:321(results_dir / "report.html").write_text(generate_html(output, auto_refresh=False, skill_name=name))322323if results_dir:324print(f"Results saved to: {results_dir}", file=sys.stderr)325326327if __name__ == "__main__":328main()329