|
| 1 | +#!/usr/bin/env python3 |
| 2 | +"""Run multiple trials of OpenEvolve to get statistics.""" |
| 3 | + |
| 4 | +import json |
| 5 | +import os |
| 6 | +import shutil |
| 7 | +import subprocess |
| 8 | +import sys |
| 9 | +from pathlib import Path |
| 10 | + |
| 11 | +# Run from the example directory |
| 12 | +os.chdir(Path(__file__).parent) |
| 13 | + |
| 14 | + |
| 15 | +def run_trial(trial_num: int, max_iterations: int = 100, seed: int = None): |
| 16 | + """Run a single OpenEvolve trial.""" |
| 17 | + output_dir = f"openevolve_output_trial_{trial_num}" |
| 18 | + |
| 19 | + # Clean output directory |
| 20 | + if os.path.exists(output_dir): |
| 21 | + shutil.rmtree(output_dir) |
| 22 | + |
| 23 | + # Update config with new seed if provided |
| 24 | + if seed is not None: |
| 25 | + # Read config |
| 26 | + with open("config.yaml", "r") as f: |
| 27 | + config_content = f.read() |
| 28 | + |
| 29 | + # Replace seed |
| 30 | + import re |
| 31 | + config_content = re.sub(r'random_seed:\s*\d+', f'random_seed: {seed}', config_content) |
| 32 | + |
| 33 | + # Write temp config |
| 34 | + temp_config = f"config_trial_{trial_num}.yaml" |
| 35 | + with open(temp_config, "w") as f: |
| 36 | + f.write(config_content) |
| 37 | + else: |
| 38 | + temp_config = "config.yaml" |
| 39 | + |
| 40 | + # Run OpenEvolve |
| 41 | + cmd = [ |
| 42 | + "openevolve-run", |
| 43 | + "initial_program.py", |
| 44 | + "evaluator.py", |
| 45 | + "--config", temp_config, |
| 46 | + "--iterations", str(max_iterations), |
| 47 | + "--output", output_dir, |
| 48 | + ] |
| 49 | + |
| 50 | + print(f"\n{'='*60}") |
| 51 | + print(f"TRIAL {trial_num + 1}: Running OpenEvolve with seed {seed}") |
| 52 | + print('='*60) |
| 53 | + |
| 54 | + result = subprocess.run(cmd, capture_output=True, text=True) |
| 55 | + |
| 56 | + # Clean up temp config |
| 57 | + if seed is not None and os.path.exists(temp_config): |
| 58 | + os.remove(temp_config) |
| 59 | + |
| 60 | + # Parse results from log |
| 61 | + solution_found_at = None |
| 62 | + best_score = 0.0 |
| 63 | + |
| 64 | + log_dir = Path(output_dir) / "logs" |
| 65 | + if log_dir.exists(): |
| 66 | + log_files = list(log_dir.glob("*.log")) |
| 67 | + if log_files: |
| 68 | + with open(log_files[0], "r") as f: |
| 69 | + log_content = f.read() |
| 70 | + |
| 71 | + import re |
| 72 | + |
| 73 | + # Find best score |
| 74 | + score_matches = re.findall(r'combined_score[=:]\s*([\d.]+)', log_content) |
| 75 | + if score_matches: |
| 76 | + best_score = max(float(s) for s in score_matches) |
| 77 | + |
| 78 | + # Look for first 100% solution - find the "New best" line with 1.0000 |
| 79 | + new_best_matches = re.findall(r'New best solution found at iteration (\d+):', log_content) |
| 80 | + perfect_matches = re.findall(r'Iteration (\d+):.*?combined_score=1\.0000', log_content) |
| 81 | + |
| 82 | + if perfect_matches: |
| 83 | + solution_found_at = int(perfect_matches[0]) |
| 84 | + elif best_score >= 1.0 and new_best_matches: |
| 85 | + # Fallback: find last new best if we have 100% |
| 86 | + solution_found_at = int(new_best_matches[-1]) |
| 87 | + |
| 88 | + return { |
| 89 | + "trial": trial_num, |
| 90 | + "seed": seed, |
| 91 | + "solution_found_at": solution_found_at, |
| 92 | + "best_score": best_score, |
| 93 | + "max_iterations": max_iterations, |
| 94 | + } |
| 95 | + |
| 96 | + |
| 97 | +def run_trials(num_trials: int = 3, max_iterations: int = 100, base_seed: int = 100): |
| 98 | + """Run multiple trials and collect statistics.""" |
| 99 | + results = [] |
| 100 | + solutions_found = [] |
| 101 | + |
| 102 | + for trial in range(num_trials): |
| 103 | + seed = base_seed + trial * 111 # Different seeds for each trial |
| 104 | + result = run_trial(trial, max_iterations, seed) |
| 105 | + results.append(result) |
| 106 | + |
| 107 | + if result["solution_found_at"] is not None: |
| 108 | + solutions_found.append(result["solution_found_at"]) |
| 109 | + print(f"Trial {trial + 1}: SUCCESS at iteration {result['solution_found_at']}") |
| 110 | + else: |
| 111 | + print(f"Trial {trial + 1}: FAILED (best score: {result['best_score']:.2%})") |
| 112 | + |
| 113 | + # Calculate statistics |
| 114 | + success_rate = len(solutions_found) / num_trials |
| 115 | + avg_iterations = sum(solutions_found) / len(solutions_found) if solutions_found else float('inf') |
| 116 | + min_iterations = min(solutions_found) if solutions_found else None |
| 117 | + max_iterations_found = max(solutions_found) if solutions_found else None |
| 118 | + |
| 119 | + print(f"\n{'='*60}") |
| 120 | + print("OPENEVOLVE TRIAL RESULTS") |
| 121 | + print('='*60) |
| 122 | + print(f"Trials: {num_trials}") |
| 123 | + print(f"Max iterations per trial: {max_iterations}") |
| 124 | + print(f"Success rate: {success_rate:.0%} ({len(solutions_found)}/{num_trials})") |
| 125 | + if solutions_found: |
| 126 | + print(f"Avg iterations to solution: {avg_iterations:.1f}") |
| 127 | + print(f"Min iterations: {min_iterations}") |
| 128 | + print(f"Max iterations: {max_iterations_found}") |
| 129 | + print('='*60) |
| 130 | + |
| 131 | + # Save summary |
| 132 | + summary = { |
| 133 | + "config": { |
| 134 | + "num_trials": num_trials, |
| 135 | + "max_iterations": max_iterations, |
| 136 | + }, |
| 137 | + "summary": { |
| 138 | + "success_rate": success_rate, |
| 139 | + "avg_iterations_to_solution": avg_iterations if solutions_found else None, |
| 140 | + "min_iterations": min_iterations, |
| 141 | + "max_iterations": max_iterations_found, |
| 142 | + "solutions_found": len(solutions_found), |
| 143 | + }, |
| 144 | + "trials": results, |
| 145 | + } |
| 146 | + |
| 147 | + with open("openevolve_trials_results.json", "w") as f: |
| 148 | + json.dump(summary, f, indent=2) |
| 149 | + |
| 150 | + print(f"\nResults saved to: openevolve_trials_results.json") |
| 151 | + |
| 152 | + # Clean up trial output directories |
| 153 | + for trial in range(num_trials): |
| 154 | + output_dir = f"openevolve_output_trial_{trial}" |
| 155 | + if os.path.exists(output_dir): |
| 156 | + shutil.rmtree(output_dir) |
| 157 | + |
| 158 | + return summary |
| 159 | + |
| 160 | + |
| 161 | +if __name__ == "__main__": |
| 162 | + import argparse |
| 163 | + parser = argparse.ArgumentParser() |
| 164 | + parser.add_argument("--trials", type=int, default=3, help="Number of trials") |
| 165 | + parser.add_argument("--iterations", type=int, default=100, help="Max iterations per trial") |
| 166 | + parser.add_argument("--seed", type=int, default=100, help="Base random seed") |
| 167 | + args = parser.parse_args() |
| 168 | + |
| 169 | + run_trials(num_trials=args.trials, max_iterations=args.iterations, base_seed=args.seed) |
0 commit comments