Skip to content

skill_benchmark

skill_benchmark

SkillBenchmarkRunner — orchestrate the 4-condition × N-seed × M-task PinchBench sweep that measures whether skills + DSPy/GEPA optimization improves agent performance.

Plan 2B implementation. See: docs/superpowers/specs/2026-04-08-skills-benchmark-evaluation-design.md

Classes

SkillBenchmarkConfig dataclass

SkillBenchmarkConfig(benchmark: str = 'pinchbench', model: str = 'qwen3.5:9b', engine: str = 'ollama', agent: str = 'native_react', tools: List[str] = (lambda: ['calculator', 'think', 'shell_exec', 'web_search', 'file_read', 'file_write'])(), seeds: List[int] = (lambda: [42, 43, 44])(), max_samples: Optional[int] = None, output_dir: Path = (lambda: Path('docs/superpowers/results/'))(), skills_dir: Path = (lambda: expanduser())(), overlay_dir_dspy: Path = (lambda: expanduser())(), overlay_dir_gepa: Path = (lambda: expanduser())())

Configuration for a single SkillBenchmarkRunner sweep.

ConditionResult dataclass

ConditionResult(condition: str, seeds: List[int], per_seed_pass_rate: Dict[int, float], mean_pass_rate: float, stddev_pass_rate: float, per_task_results: Dict[str, List[bool]], skill_invocation_counts: Dict[str, int], total_tokens: int, total_runtime_seconds: float)

Aggregated result for a single condition across all seeds.

ConditionComparison dataclass

ConditionComparison(config: SkillBenchmarkConfig, started_at: str, ended_at: str, results: Dict[str, ConditionResult], deltas: Dict[str, float])

Top-level result of a SkillBenchmarkRunner sweep.

SkillBenchmarkRunner

SkillBenchmarkRunner(config: SkillBenchmarkConfig)

Orchestrates the 4-condition × N-seed × M-task PinchBench sweep.

Each condition is a different SystemBuilder configuration: - no_skills: cfg.skills.enabled = False - skills_on: enabled, overlay_dir = empty (no overlays load) - skills_optimized_dspy: enabled, overlay_dir = config.overlay_dir_dspy - skills_optimized_gepa: enabled, overlay_dir = config.overlay_dir_gepa

Per-seed runs share the same backend instantiation but pass a fresh seed to the EvalRunner.

Source code in src/openjarvis/evals/skill_benchmark.py
def __init__(self, config: SkillBenchmarkConfig) -> None:
    self._config = config
    # An "empty" overlay dir for the skills_on condition.  We point at
    # a known-empty subdirectory under the output dir so SkillManager
    # finds zero overlays even if the user happens to have populated
    # the default ~/.openjarvis/learning/skills/ tree.
    self._empty_overlay_dir = (
        Path(self._config.output_dir).expanduser() / "_skills_on_empty_overlays"
    )
    self._empty_overlay_dir.mkdir(parents=True, exist_ok=True)
Functions
run_condition
run_condition(condition: str) -> ConditionResult

Run the benchmark for condition across all configured seeds.

Returns a ConditionResult with mean ± stddev pass rate and the per-task / per-skill aggregations.

Source code in src/openjarvis/evals/skill_benchmark.py
def run_condition(self, condition: str) -> ConditionResult:
    """Run the benchmark for *condition* across all configured seeds.

    Returns a ConditionResult with mean ± stddev pass rate and the
    per-task / per-skill aggregations.
    """
    if condition not in CONDITIONS:
        raise ValueError(
            f"Unknown condition '{condition}'.  Expected one of: "
            f"{', '.join(CONDITIONS)}"
        )

    per_seed_pass_rate: Dict[int, float] = {}
    per_task_results: Dict[str, List[bool]] = {}
    skill_counts: Dict[str, int] = {}
    total_tokens = 0
    total_runtime = 0.0

    for seed in self._config.seeds:
        seed_data = self._run_single_seed(condition, seed)
        per_seed_pass_rate[seed] = float(seed_data["pass_rate"])

        for task_id, ok in seed_data["per_task"].items():
            per_task_results.setdefault(task_id, []).append(bool(ok))

        for skill_name, count in seed_data["skill_invocations"].items():
            skill_counts[skill_name] = skill_counts.get(skill_name, 0) + int(count)

        total_tokens += int(seed_data["total_tokens"])
        total_runtime += float(seed_data["total_runtime_seconds"])

    rates = list(per_seed_pass_rate.values())
    mean_rate = statistics.fmean(rates) if rates else 0.0
    stddev_rate = statistics.stdev(rates) if len(rates) > 1 else 0.0

    return ConditionResult(
        condition=condition,
        seeds=list(self._config.seeds),
        per_seed_pass_rate=per_seed_pass_rate,
        mean_pass_rate=mean_rate,
        stddev_pass_rate=stddev_rate,
        per_task_results=per_task_results,
        skill_invocation_counts=skill_counts,
        total_tokens=total_tokens,
        total_runtime_seconds=total_runtime,
    )
run_all_conditions
run_all_conditions() -> ConditionComparison

Run all 4 conditions × all seeds.

Returns a ConditionComparison with per-condition results and the computed deltas (skills_on - no_skills, etc.).

Source code in src/openjarvis/evals/skill_benchmark.py
def run_all_conditions(self) -> ConditionComparison:
    """Run all 4 conditions × all seeds.

    Returns a ConditionComparison with per-condition results and the
    computed deltas (skills_on - no_skills, etc.).
    """
    started_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")

    results: Dict[str, ConditionResult] = {}
    for condition in CONDITIONS:
        LOGGER.info("Running condition: %s", condition)
        results[condition] = self.run_condition(condition)

    ended_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")

    # Compute the headline deltas
    deltas: Dict[str, float] = {}
    if "no_skills" in results and "skills_on" in results:
        deltas["skills_on - no_skills"] = (
            results["skills_on"].mean_pass_rate
            - results["no_skills"].mean_pass_rate
        )
    if "skills_on" in results and "skills_optimized_dspy" in results:
        deltas["skills_optimized_dspy - skills_on"] = (
            results["skills_optimized_dspy"].mean_pass_rate
            - results["skills_on"].mean_pass_rate
        )
    if "skills_on" in results and "skills_optimized_gepa" in results:
        deltas["skills_optimized_gepa - skills_on"] = (
            results["skills_optimized_gepa"].mean_pass_rate
            - results["skills_on"].mean_pass_rate
        )

    return ConditionComparison(
        config=self._config,
        started_at=started_at,
        ended_at=ended_at,
        results=results,
        deltas=deltas,
    )
write_report
write_report(comparison: ConditionComparison) -> Path

Write a markdown report to output_dir.

Filename: pinchbench-skills-eval-{YYYY-MM-DD}.md

Source code in src/openjarvis/evals/skill_benchmark.py
def write_report(self, comparison: ConditionComparison) -> Path:
    """Write a markdown report to output_dir.

    Filename: pinchbench-skills-eval-{YYYY-MM-DD}.md
    """
    date_str = datetime.now(timezone.utc).strftime("%Y-%m-%d")
    out_dir = Path(self._config.output_dir).expanduser()
    out_dir.mkdir(parents=True, exist_ok=True)
    path = out_dir / f"pinchbench-skills-eval-{date_str}.md"

    lines: List[str] = []
    lines.append(f"# PinchBench Skills Evaluation — {date_str}")
    lines.append("")
    lines.append(f"**Started:** {comparison.started_at}")
    lines.append(f"**Ended:** {comparison.ended_at}")
    lines.append(f"**Model:** {comparison.config.model}")
    lines.append(f"**Engine:** {comparison.config.engine}")
    lines.append(f"**Agent:** {comparison.config.agent}")
    lines.append(f"**Seeds:** {', '.join(str(s) for s in comparison.config.seeds)}")
    max_samples = comparison.config.max_samples
    lines.append(
        f"**Max samples:** {max_samples if max_samples is not None else 'full'}"
    )
    lines.append("")

    # Summary table
    lines.append("## Summary")
    lines.append("")
    lines.append(
        "| Condition | Mean pass rate | Stddev | Total tokens | Runtime (s) |"
    )
    lines.append("|---|---|---|---|---|")
    for condition in CONDITIONS:
        r = comparison.results.get(condition)
        if r is None:
            continue
        lines.append(
            f"| {condition} | {r.mean_pass_rate:.3f} | "
            f"{r.stddev_pass_rate:.3f} | {r.total_tokens} | "
            f"{r.total_runtime_seconds:.1f} |"
        )
    lines.append("")

    # Deltas
    if comparison.deltas:
        lines.append("## Deltas")
        lines.append("")
        for name, value in comparison.deltas.items():
            sign = "+" if value >= 0 else ""
            lines.append(f"- **{name}**: {sign}{value:.3f}")
        lines.append("")

    # Per-task breakdown
    lines.append("## Per-task results")
    lines.append("")
    all_tasks: set[str] = set()
    for r in comparison.results.values():
        all_tasks.update(r.per_task_results.keys())
    if all_tasks:
        header = "| Task | " + " | ".join(CONDITIONS) + " |"
        sep = "|---|" + "|".join(["---"] * len(CONDITIONS)) + "|"
        lines.append(header)
        lines.append(sep)
        for task_id in sorted(all_tasks):
            row = [task_id]
            for condition in CONDITIONS:
                r = comparison.results.get(condition)
                passes = (
                    r.per_task_results.get(task_id, []) if r is not None else []
                )
                if not passes:
                    row.append("—")
                else:
                    n_pass = sum(1 for v in passes if v)
                    row.append(f"{n_pass}/{len(passes)}")
            lines.append("| " + " | ".join(row) + " |")
    lines.append("")

    # Per-skill invocation counts
    lines.append("## Per-skill invocation counts")
    lines.append("")
    all_skills: set[str] = set()
    for r in comparison.results.values():
        all_skills.update(r.skill_invocation_counts.keys())
    if all_skills:
        header = "| Skill | " + " | ".join(CONDITIONS) + " |"
        sep = "|---|" + "|".join(["---"] * len(CONDITIONS)) + "|"
        lines.append(header)
        lines.append(sep)
        for skill_name in sorted(all_skills):
            row = [skill_name]
            for condition in CONDITIONS:
                r = comparison.results.get(condition)
                count = (
                    r.skill_invocation_counts.get(skill_name, 0)
                    if r is not None
                    else 0
                )
                row.append(str(count))
            lines.append("| " + " | ".join(row) + " |")
    else:
        lines.append("(no skill invocations recorded)")
    lines.append("")

    path.write_text("\n".join(lines), encoding="utf-8")
    return path