Skip to content

llm_optimizer

llm_optimizer

Backward-compat shim: moved to learning.optimize.

Classes

LLMOptimizer

LLMOptimizer(search_space: SearchSpace, optimizer_model: str = 'claude-sonnet-4-6', optimizer_backend: Optional[InferenceBackend] = None)

Uses a cloud LLM to propose optimal OpenJarvis configs.

Inspired by DSPy's GEPA: uses textual feedback from execution traces rather than just scalar rewards.

Source code in src/openjarvis/learning/optimize/llm_optimizer.py
def __init__(
    self,
    search_space: SearchSpace,
    optimizer_model: str = "claude-sonnet-4-6",
    optimizer_backend: Optional[InferenceBackend] = None,
) -> None:
    self.search_space = search_space
    self.optimizer_model = optimizer_model
    self.optimizer_backend = optimizer_backend
Functions
propose_initial
propose_initial() -> TrialConfig

Propose a reasonable starting config from the search space.

Source code in src/openjarvis/learning/optimize/llm_optimizer.py
def propose_initial(self) -> TrialConfig:
    """Propose a reasonable starting config from the search space."""
    if self.optimizer_backend is None:
        raise ValueError(
            "optimizer_backend is required to propose configurations"
        )

    prompt = self._build_initial_prompt()
    response = self.optimizer_backend.generate(
        prompt,
        model=self.optimizer_model,
        system="You are an expert AI systems optimizer.",
        temperature=0.7,
        max_tokens=2048,
    )
    return self._parse_config_response(response)
propose_next
propose_next(history: List[TrialResult], traces: Optional[List[Trace]] = None, frontier_ids: Optional[set] = None) -> TrialConfig

Ask the LLM to propose the next config to evaluate.

Source code in src/openjarvis/learning/optimize/llm_optimizer.py
def propose_next(
    self,
    history: List[TrialResult],
    traces: Optional[List[Trace]] = None,
    frontier_ids: Optional[set] = None,
) -> TrialConfig:
    """Ask the LLM to propose the next config to evaluate."""
    if self.optimizer_backend is None:
        raise ValueError(
            "optimizer_backend is required to propose configurations"
        )

    prompt = self._build_propose_prompt(history, traces, frontier_ids=frontier_ids)
    response = self.optimizer_backend.generate(
        prompt,
        model=self.optimizer_model,
        system="You are an expert AI systems optimizer.",
        temperature=0.7,
        max_tokens=2048,
    )
    return self._parse_config_response(response)
analyze_trial
analyze_trial(trial: TrialConfig, summary: RunSummary, traces: Optional[List[Trace]] = None, sample_scores: Optional[List[SampleScore]] = None, per_benchmark: Optional[List[BenchmarkScore]] = None) -> TrialFeedback

Ask the LLM to analyze a completed trial. Returns structured feedback.

Source code in src/openjarvis/learning/optimize/llm_optimizer.py
def analyze_trial(
    self,
    trial: TrialConfig,
    summary: RunSummary,
    traces: Optional[List[Trace]] = None,
    sample_scores: Optional[List[SampleScore]] = None,
    per_benchmark: Optional[List[BenchmarkScore]] = None,
) -> TrialFeedback:
    """Ask the LLM to analyze a completed trial. Returns structured feedback."""
    if self.optimizer_backend is None:
        raise ValueError(
            "optimizer_backend is required to analyze trials"
        )

    prompt = self._build_analyze_prompt(
        trial, summary, traces, sample_scores, per_benchmark,
    )
    response = self.optimizer_backend.generate(
        prompt,
        model=self.optimizer_model,
        system="You are an expert AI systems analyst.",
        temperature=0.3,
        max_tokens=2048,
    )
    return self._parse_feedback_response(response)
propose_targeted
propose_targeted(history: List[TrialResult], base_config: TrialConfig, target_primitive: str, frontier_ids: Optional[set] = None) -> TrialConfig

Propose a config that only changes one primitive.

Source code in src/openjarvis/learning/optimize/llm_optimizer.py
def propose_targeted(
    self,
    history: List[TrialResult],
    base_config: TrialConfig,
    target_primitive: str,
    frontier_ids: Optional[set] = None,
) -> TrialConfig:
    """Propose a config that only changes one primitive."""
    if self.optimizer_backend is None:
        raise ValueError(
            "optimizer_backend is required to propose configurations"
        )

    prompt = self._build_targeted_prompt(
        history, base_config, target_primitive, frontier_ids,
    )
    response = self.optimizer_backend.generate(
        prompt,
        model=self.optimizer_model,
        system="You are an expert AI systems optimizer.",
        temperature=0.7,
        max_tokens=2048,
    )
    proposed = self._parse_config_response(response)

    # Enforce constraint: preserve non-target params from base_config
    merged_params = dict(base_config.params)
    for key, value in proposed.params.items():
        if key.startswith(target_primitive + ".") or key.startswith(
            target_primitive.rstrip("s") + "."
        ):
            merged_params[key] = value
    proposed.params = merged_params
    return proposed
propose_merge
propose_merge(candidates: List[TrialResult], history: List[TrialResult], frontier_ids: Optional[set] = None) -> TrialConfig

Combine best aspects of frontier members into one config.

Source code in src/openjarvis/learning/optimize/llm_optimizer.py
def propose_merge(
    self,
    candidates: List[TrialResult],
    history: List[TrialResult],
    frontier_ids: Optional[set] = None,
) -> TrialConfig:
    """Combine best aspects of frontier members into one config."""
    if self.optimizer_backend is None:
        raise ValueError(
            "optimizer_backend is required to propose configurations"
        )

    prompt = self._build_merge_prompt(candidates, history, frontier_ids)
    response = self.optimizer_backend.generate(
        prompt,
        model=self.optimizer_model,
        system="You are an expert AI systems optimizer.",
        temperature=0.7,
        max_tokens=2048,
    )
    return self._parse_config_response(response)