-
Notifications
You must be signed in to change notification settings - Fork 973
Expand file tree
/
Copy pathconfig.py
More file actions
563 lines (451 loc) · 19.6 KB
/
config.py
File metadata and controls
563 lines (451 loc) · 19.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
"""
Configuration handling for OpenEvolve
"""
import os
import re
from dataclasses import asdict, dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union
import dacite
import yaml
if TYPE_CHECKING:
from openevolve.llm.base import LLMInterface
_ENV_VAR_PATTERN = re.compile(r"^\$\{([^}]+)\}$") # ${VAR}
def _resolve_env_var(value: Optional[str]) -> Optional[str]:
"""
Resolve ${VAR} environment variable reference in a string value.
In current implementation pattern must match the entire string (e.g., "${OPENAI_API_KEY}"),
not embedded within other text.
Args:
value: The string value that may contain ${VAR} syntax
Returns:
The resolved value with environment variable expanded, or original value if no match
Raises:
ValueError: If the environment variable is referenced but not set
"""
if value is None:
return None
match = _ENV_VAR_PATTERN.match(value)
if not match:
return value
var_name = match.group(1)
env_value = os.environ.get(var_name)
if env_value is None:
raise ValueError(f"Environment variable {var_name} is not set")
return env_value
@dataclass
class LLMModelConfig:
"""Configuration for a single LLM model"""
# API configuration
api_base: str = None
api_key: Optional[str] = None
name: str = None
# Custom LLM client
init_client: Optional[Callable] = None
# Weight for model in ensemble
weight: float = 1.0
# Generation parameters
system_message: Optional[str] = None
temperature: float | None = None
top_p: float | None = None
max_tokens: int = None
# Request parameters
timeout: int = None
retries: int = None
retry_delay: int = None
# Reproducibility
random_seed: Optional[int] = None
# Reasoning parameters
reasoning_effort: Optional[str] = None
# Manual mode (human-in-the-loop)
manual_mode: Optional[bool] = None
_manual_queue_dir: Optional[str] = None
def __post_init__(self):
"""Post-initialization to resolve ${VAR} env var references in api_key"""
self.api_key = _resolve_env_var(self.api_key)
@dataclass
class LLMConfig(LLMModelConfig):
"""Configuration for LLM models"""
# API configuration
api_base: str = "https://api.openai.com/v1"
# Generation parameters
system_message: Optional[str] = "system_message"
temperature: float | None = 0.7
top_p: float | None = None
max_tokens: int = 4096
# Request parameters
timeout: int = 60
retries: int = 3
retry_delay: int = 5
# n-model configuration for evolution LLM ensemble
models: List[LLMModelConfig] = field(default_factory=list)
# n-model configuration for evaluator LLM ensemble
evaluator_models: List[LLMModelConfig] = field(default_factory=lambda: [])
# Backwardes compatibility with primary_model(_weight) options
primary_model: str = None
primary_model_weight: float = None
secondary_model: str = None
secondary_model_weight: float = None
# Reasoning parameters (inherited from LLMModelConfig but can be overridden)
reasoning_effort: Optional[str] = None
# Manual mode switch
manual_mode: bool = False
def __post_init__(self):
"""Post-initialization to set up model configurations"""
super().__post_init__() # Resolve ${VAR} in api_key at LLMConfig level
# Handle backward compatibility for primary_model(_weight) and secondary_model(_weight).
if self.primary_model:
# Create primary model
primary_model = LLMModelConfig(
name=self.primary_model, weight=self.primary_model_weight or 1.0
)
self.models.append(primary_model)
if self.secondary_model:
# Create secondary model (only if weight > 0)
if self.secondary_model_weight is None or self.secondary_model_weight > 0:
secondary_model = LLMModelConfig(
name=self.secondary_model,
weight=(
self.secondary_model_weight
if self.secondary_model_weight is not None
else 0.2
),
)
self.models.append(secondary_model)
# Only validate if this looks like a user config (has some model info)
# Don't validate during internal/default initialization
if (
self.primary_model
or self.secondary_model
or self.primary_model_weight
or self.secondary_model_weight
) and not self.models:
raise ValueError(
"No LLM models configured. Please specify 'models' array or "
"'primary_model' in your configuration."
)
# If no evaluator models are defined, use the same models as for evolution
if not self.evaluator_models:
self.evaluator_models = self.models.copy()
# Update models with shared configuration values
shared_config = {
"api_base": self.api_base,
"api_key": self.api_key,
"temperature": self.temperature,
"top_p": self.top_p,
"max_tokens": self.max_tokens,
"timeout": self.timeout,
"retries": self.retries,
"retry_delay": self.retry_delay,
"random_seed": self.random_seed,
"reasoning_effort": self.reasoning_effort,
"manual_mode": self.manual_mode,
}
self.update_model_params(shared_config)
def update_model_params(self, args: Dict[str, Any], overwrite: bool = False) -> None:
"""Update model parameters for all models"""
for model in self.models + self.evaluator_models:
for key, value in args.items():
if overwrite or getattr(model, key, None) is None:
setattr(model, key, value)
def rebuild_models(self) -> None:
"""Rebuild the models list after primary_model/secondary_model field changes"""
# Clear existing models lists
self.models = []
self.evaluator_models = []
# Re-run model generation logic from __post_init__
if self.primary_model:
# Create primary model
primary_model = LLMModelConfig(
name=self.primary_model, weight=self.primary_model_weight or 1.0
)
self.models.append(primary_model)
if self.secondary_model:
# Create secondary model (only if weight > 0)
if self.secondary_model_weight is None or self.secondary_model_weight > 0:
secondary_model = LLMModelConfig(
name=self.secondary_model,
weight=(
self.secondary_model_weight
if self.secondary_model_weight is not None
else 0.2
),
)
self.models.append(secondary_model)
# If no evaluator models are defined, use the same models as for evolution
if not self.evaluator_models:
self.evaluator_models = self.models.copy()
# Update models with shared configuration values
shared_config = {
"api_base": self.api_base,
"api_key": self.api_key,
"temperature": self.temperature,
"top_p": self.top_p,
"max_tokens": self.max_tokens,
"timeout": self.timeout,
"retries": self.retries,
"retry_delay": self.retry_delay,
"random_seed": self.random_seed,
"reasoning_effort": self.reasoning_effort,
}
self.update_model_params(shared_config)
@dataclass
class PromptConfig:
"""Configuration for prompt generation"""
template_dir: Optional[str] = None
system_message: str = "system_message"
evaluator_system_message: str = "evaluator_system_message"
# Large-codebase mode: represent programs in prompts via compact changes descriptions
programs_as_changes_description: bool = False
system_message_changes_description: Optional[str] = None
initial_changes_description: str = ""
# Number of examples to include in the prompt
num_top_programs: int = 3
num_diverse_programs: int = 2
# Template stochasticity
use_template_stochasticity: bool = True
template_variations: Dict[str, List[str]] = field(default_factory=dict)
# Meta-prompting
# Note: meta-prompting features not implemented
use_meta_prompting: bool = False
meta_prompt_weight: float = 0.1
# Artifact rendering
include_artifacts: bool = True
max_artifact_bytes: int = 20 * 1024 # 20KB in prompt
artifact_security_filter: bool = True
# Feature extraction and program labeling
suggest_simplification_after_chars: Optional[int] = (
500 # Suggest simplifying if program exceeds this many characters
)
include_changes_under_chars: Optional[int] = (
100 # Include change descriptions in features if under this length
)
concise_implementation_max_lines: Optional[int] = (
10 # Label as "concise" if program has this many lines or fewer
)
comprehensive_implementation_min_lines: Optional[int] = (
50 # Label as "comprehensive" if program has this many lines or more
)
# Diff summary formatting for "Previous Attempts" section
diff_summary_max_line_len: int = 100 # Truncate lines longer than this
diff_summary_max_lines: int = 30 # Max lines per SEARCH/REPLACE block
# Backward compatibility - deprecated
code_length_threshold: Optional[int] = (
None # Deprecated: use suggest_simplification_after_chars
)
@dataclass
class DatabaseConfig:
"""Configuration for the program database"""
# General settings
db_path: Optional[str] = None # Path to store database on disk
in_memory: bool = True
# Prompt and response logging to programs/<id>.json
log_prompts: bool = True
# Evolutionary parameters
population_size: int = 1000
archive_size: int = 100
num_islands: int = 5
# Selection parameters
elite_selection_ratio: float = 0.1
exploration_ratio: float = 0.2
exploitation_ratio: float = 0.7
# Note: diversity_metric fixed to "edit_distance"
diversity_metric: str = "edit_distance" # Options: "edit_distance", "feature_based"
# Feature map dimensions for MAP-Elites
# Default to complexity and diversity for better exploration
# CRITICAL: For custom dimensions, evaluators must return RAW VALUES, not bin indices
# Built-in: "complexity", "diversity", "score" (always available)
# Custom: Any metric from your evaluator (must be continuous values)
feature_dimensions: List[str] = field(
default_factory=lambda: ["complexity", "diversity"],
metadata={
"help": "List of feature dimensions for MAP-Elites grid. "
"Built-in dimensions: 'complexity', 'diversity', 'score'. "
"Custom dimensions: Must match metric names from evaluator. "
"IMPORTANT: Evaluators must return raw continuous values for custom dimensions, "
"NOT pre-computed bin indices. OpenEvolve handles all scaling and binning internally."
},
)
feature_bins: Union[int, Dict[str, int]] = 10 # Can be int (all dims) or dict (per-dim)
diversity_reference_size: int = 20 # Size of reference set for diversity calculation
# Migration parameters for island-based evolution
migration_interval: int = 50 # Migrate every N generations
migration_rate: float = 0.1 # Fraction of population to migrate
# Random seed for reproducible sampling
random_seed: Optional[int] = 42
# Artifact storage
artifacts_base_path: Optional[str] = None # Defaults to db_path/artifacts
artifact_size_threshold: int = 32 * 1024 # 32KB threshold
cleanup_old_artifacts: bool = True
artifact_retention_days: int = 30
max_snapshot_artifacts: Optional[int] = (
100 # Max artifacts in worker snapshots (None=unlimited)
)
novelty_llm: Optional["LLMInterface"] = None
embedding_model: Optional[str] = None
similarity_threshold: float = 0.99
@dataclass
class EvaluatorConfig:
"""Configuration for program evaluation"""
# General settings
timeout: int = 300 # Maximum evaluation time in seconds
max_retries: int = 3
# Resource limits for evaluation
# Note: resource limits not implemented
memory_limit_mb: Optional[int] = None
cpu_limit: Optional[float] = None
# Evaluation strategies
cascade_evaluation: bool = True
cascade_thresholds: List[float] = field(default_factory=lambda: [0.5, 0.75, 0.9])
# Parallel evaluation
parallel_evaluations: int = 1
# Note: distributed evaluation not implemented
distributed: bool = False
# LLM-based feedback
use_llm_feedback: bool = False
llm_feedback_weight: float = 0.1
# Artifact handling
enable_artifacts: bool = True
max_artifact_storage: int = 100 * 1024 * 1024 # 100MB per program
@dataclass
class EvolutionTraceConfig:
"""Configuration for evolution trace logging"""
enabled: bool = False
format: str = "jsonl" # Options: "jsonl", "json", "hdf5"
include_code: bool = False
include_prompts: bool = True
output_path: Optional[str] = None
buffer_size: int = 10
compress: bool = False
@dataclass
class PromptMetaEvolutionConfig:
"""Configuration for meta-evolution of prompt templates.
When enabled, OpenEvolve maintains an archive of prompt templates,
tracks their success rates, and evolves them over time to improve
mutation quality.
"""
# Master switch
enabled: bool = False
# Archive settings
archive_size: int = 20 # Max templates to keep in archive
# Evolution triggers
min_uses_for_evolution: int = 10 # Min uses before template can be evolved
evolution_interval: int = 20 # Trigger evolution every N iterations
# Sampling behavior
exploration_rate: float = 0.2 # Probability of sampling random template
elite_fraction: float = 0.3 # Fraction of top templates protected from pruning
# Scoring weights (must sum to 1.0)
# score = w_success * success_rate + w_improvement * improvement_rate + w_fitness * normalized_fitness_delta
score_weight_success: float = 0.3 # Weight for success rate (mutations accepted)
score_weight_improvement: float = 0.4 # Weight for improvement rate (fitness increased)
score_weight_fitness_delta: float = 0.3 # Weight for avg fitness delta magnitude
# Scoring parameters
score_min_uses: int = 5 # Min uses before score is calculated (else neutral prior)
score_neutral_prior: float = 0.5 # Score returned when uses < min_uses
def __post_init__(self):
"""Validate configuration after initialization."""
weight_sum = (
self.score_weight_success
+ self.score_weight_improvement
+ self.score_weight_fitness_delta
)
tolerance = 1e-6
if abs(weight_sum - 1.0) > tolerance:
raise ValueError(
f"Scoring weights must sum to 1.0, got {weight_sum:.6f} "
f"(success={self.score_weight_success}, "
f"improvement={self.score_weight_improvement}, "
f"fitness_delta={self.score_weight_fitness_delta})"
)
@dataclass
class Config:
"""Master configuration for OpenEvolve"""
# General settings
max_iterations: int = 10000
checkpoint_interval: int = 100
log_level: str = "INFO"
log_dir: Optional[str] = None
random_seed: Optional[int] = 42
language: str = None
file_suffix: str = ".py"
# Component configurations
llm: LLMConfig = field(default_factory=LLMConfig)
prompt: PromptConfig = field(default_factory=PromptConfig)
database: DatabaseConfig = field(default_factory=DatabaseConfig)
evaluator: EvaluatorConfig = field(default_factory=EvaluatorConfig)
evolution_trace: EvolutionTraceConfig = field(default_factory=EvolutionTraceConfig)
prompt_meta_evolution: PromptMetaEvolutionConfig = field(
default_factory=PromptMetaEvolutionConfig
)
# Evolution settings
diff_based_evolution: bool = True
max_code_length: int = 10000
diff_pattern: str = r"<<<<<<< SEARCH\n(.*?)=======\n(.*?)>>>>>>> REPLACE"
# Early stopping settings
early_stopping_patience: Optional[int] = None
convergence_threshold: float = 0.001
early_stopping_metric: str = "combined_score"
# Parallel controller settings
max_tasks_per_child: Optional[int] = None
@classmethod
def from_yaml(cls, path: Union[str, Path]) -> "Config":
"""Load configuration from a YAML file"""
config_path = Path(path).resolve()
with open(config_path, "r") as f:
config_dict = yaml.safe_load(f)
config = cls.from_dict(config_dict)
# Resolve template_dir relative to config file location
if config.prompt.template_dir:
template_path = Path(config.prompt.template_dir)
if not template_path.is_absolute():
config.prompt.template_dir = str((config_path.parent / template_path).resolve())
return config
@classmethod
def from_dict(cls, config_dict: Dict[str, Any]) -> "Config":
if "diff_pattern" in config_dict:
try:
re.compile(config_dict["diff_pattern"])
except re.error as e:
raise ValueError(f"Invalid regex pattern in diff_pattern: {e}")
# Remove None values for temperature and top_p to avoid dacite type errors;
# alternatively, pass check_types=False to dacite.from_dict, but that can hide other issues
if "llm" in config_dict:
if "temperature" in config_dict["llm"] and config_dict["llm"]["temperature"] is None:
del config_dict["llm"]["temperature"]
if "top_p" in config_dict["llm"] and config_dict["llm"]["top_p"] is None:
del config_dict["llm"]["top_p"]
config: Config = dacite.from_dict(
data_class=cls,
data=config_dict,
config=dacite.Config(
cast=[List, Union],
forward_references={"LLMInterface": Any},
),
)
if config.database.random_seed is None and config.random_seed is not None:
config.database.random_seed = config.random_seed
if config.prompt.programs_as_changes_description and not config.diff_based_evolution:
raise ValueError(
"prompt.programs_as_changes_description=true requires diff_based_evolution=true "
"(full rewrites cannot reliably update code and changes_description together)"
)
return config
def to_dict(self) -> Dict[str, Any]:
return asdict(self)
def to_yaml(self, path: Union[str, Path]) -> None:
"""Save configuration to a YAML file"""
with open(path, "w") as f:
yaml.dump(self.to_dict(), f, default_flow_style=False)
def load_config(config_path: Optional[Union[str, Path]] = None) -> Config:
"""Load configuration from a YAML file or use defaults"""
if config_path and os.path.exists(config_path):
config = Config.from_yaml(config_path)
else:
config = Config()
# Use environment variables if available
api_key = os.environ.get("OPENAI_API_KEY")
api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1")
config.llm.update_model_params({"api_key": api_key, "api_base": api_base})
# Make the system message available to the individual models, in case it is not provided from the prompt sampler
config.llm.update_model_params({"system_message": config.prompt.system_message})
return config