Skip to content

test_runner

test_runner

Tests for the EvalRunner.

Classes

TestEvalRunner

Functions
test_telemetry_fields_in_jsonl
test_telemetry_fields_in_jsonl(tmp_path)

Verify telemetry fields are written to JSONL output.

Source code in src/openjarvis/evals/tests/test_runner.py
def test_telemetry_fields_in_jsonl(self, tmp_path):
    """Verify telemetry fields are written to JSONL output."""
    records = self._make_records(2)
    output_path = tmp_path / "results.jsonl"

    config = RunConfig(
        benchmark="test",
        backend="mock",
        model="m",
        max_workers=1,
        output_path=str(output_path),
    )

    dataset = MockDataset(records)
    backend = MockBackend()
    scorer = MockScorer(result=True)

    runner = EvalRunner(config, dataset, backend, scorer)
    runner.run()

    lines = output_path.read_text().strip().split("\n")
    first = json.loads(lines[0])
    assert "energy_joules" in first
    assert "power_watts" in first
    assert "gpu_utilization_pct" in first
    assert "throughput_tok_per_sec" in first
    assert "mfu_pct" in first
    assert "mbu_pct" in first
    assert "ipw" in first
    assert "ipj" in first
test_ipw_ipj_computation
test_ipw_ipj_computation(tmp_path)

IPW and IPJ should be computed for correct samples.

Source code in src/openjarvis/evals/tests/test_runner.py
def test_ipw_ipj_computation(self, tmp_path):
    """IPW and IPJ should be computed for correct samples."""
    records = self._make_records(2)
    output_path = tmp_path / "results.jsonl"

    config = RunConfig(
        benchmark="test",
        backend="mock",
        model="m",
        max_workers=1,
        output_path=str(output_path),
    )

    dataset = MockDataset(records)
    backend = MockBackend()  # returns power=250W, energy=50J
    scorer = MockScorer(result=True)

    runner = EvalRunner(config, dataset, backend, scorer)
    runner.run()

    lines = output_path.read_text().strip().split("\n")
    r = json.loads(lines[0])
    # accuracy=1.0, power=250W → IPW = 1/250 = 0.004
    assert r["ipw"] == pytest.approx(1.0 / 250.0, rel=1e-4)
    # accuracy=1.0, energy=50J → IPJ = 1/50 = 0.02
    assert r["ipj"] == pytest.approx(1.0 / 50.0, rel=1e-4)
test_ipw_ipj_zero_for_incorrect
test_ipw_ipj_zero_for_incorrect(tmp_path)

IPW and IPJ should be 0 for incorrect samples.

Source code in src/openjarvis/evals/tests/test_runner.py
def test_ipw_ipj_zero_for_incorrect(self, tmp_path):
    """IPW and IPJ should be 0 for incorrect samples."""
    records = self._make_records(1)
    output_path = tmp_path / "results.jsonl"

    config = RunConfig(
        benchmark="test",
        backend="mock",
        model="m",
        max_workers=1,
        output_path=str(output_path),
    )

    dataset = MockDataset(records)
    backend = MockBackend()
    scorer = MockScorer(result=False)

    runner = EvalRunner(config, dataset, backend, scorer)
    runner.run()

    lines = output_path.read_text().strip().split("\n")
    r = json.loads(lines[0])
    assert r["ipw"] == 0.0
    assert r["ipj"] == 0.0
test_mfu_mbu_with_metadata
test_mfu_mbu_with_metadata(tmp_path)

MFU/MBU should be computed when model metadata is provided.

Source code in src/openjarvis/evals/tests/test_runner.py
def test_mfu_mbu_with_metadata(self, tmp_path):
    """MFU/MBU should be computed when model metadata is provided."""
    records = self._make_records(1)
    output_path = tmp_path / "results.jsonl"

    config = RunConfig(
        benchmark="test",
        backend="mock",
        model="m",
        max_workers=1,
        output_path=str(output_path),
        metadata={
            "param_count_b": 7.0,
            "gpu_peak_tflops": 312.0,
            "gpu_peak_bandwidth_gb_s": 2039.0,
            "num_gpus": 1,
        },
    )

    dataset = MockDataset(records)
    backend = MockBackend()  # throughput=38 tok/s
    scorer = MockScorer(result=True)

    runner = EvalRunner(config, dataset, backend, scorer)
    runner.run()

    lines = output_path.read_text().strip().split("\n")
    r = json.loads(lines[0])
    # With compute_efficiency available, MFU/MBU should be > 0
    assert r["mfu_pct"] > 0 or r["mfu_pct"] == 0  # depends on import
    assert r["mbu_pct"] >= 0
test_summary_metric_stats
test_summary_metric_stats(tmp_path)

Summary should include MetricStats for telemetry fields.

Source code in src/openjarvis/evals/tests/test_runner.py
def test_summary_metric_stats(self, tmp_path):
    """Summary should include MetricStats for telemetry fields."""
    records = self._make_records(5)
    output_path = tmp_path / "results.jsonl"

    config = RunConfig(
        benchmark="test",
        backend="mock",
        model="m",
        max_workers=1,
        output_path=str(output_path),
    )

    dataset = MockDataset(records)
    backend = MockBackend()
    scorer = MockScorer(result=True)

    runner = EvalRunner(config, dataset, backend, scorer)
    summary = runner.run()

    assert summary.accuracy_stats is not None
    assert summary.accuracy_stats.mean == 1.0
    assert summary.energy_stats is not None
    assert summary.energy_stats.mean == 50.0
    assert summary.power_stats is not None
    assert summary.power_stats.mean == 250.0
    assert summary.throughput_stats is not None
    assert summary.ipw_stats is not None
    assert summary.total_energy_joules == 250.0  # 5 * 50.0
test_summary_json_includes_metric_stats
test_summary_json_includes_metric_stats(tmp_path)

Summary JSON file should serialize MetricStats fields.

Source code in src/openjarvis/evals/tests/test_runner.py
def test_summary_json_includes_metric_stats(self, tmp_path):
    """Summary JSON file should serialize MetricStats fields."""
    records = self._make_records(3)
    output_path = tmp_path / "results.jsonl"

    config = RunConfig(
        benchmark="test",
        backend="mock",
        model="m",
        max_workers=1,
        output_path=str(output_path),
    )

    dataset = MockDataset(records)
    backend = MockBackend()
    scorer = MockScorer(result=True)

    runner = EvalRunner(config, dataset, backend, scorer)
    runner.run()

    summary_path = output_path.with_suffix(".summary.json")
    data = json.loads(summary_path.read_text())
    assert "accuracy_stats" in data
    assert data["accuracy_stats"]["mean"] == 1.0
    assert "energy_stats" in data
    assert "power_stats" in data
    assert "mfu_stats" in data or data["mfu_stats"] is None
    assert "ipw_stats" in data
    assert "ipj_stats" in data
    assert "total_energy_joules" in data

TestRunnerTokenStats

Functions
test_summary_has_total_input_output_tokens
test_summary_has_total_input_output_tokens(tmp_path)

RunSummary should include total token counts.

Source code in src/openjarvis/evals/tests/test_runner.py
def test_summary_has_total_input_output_tokens(self, tmp_path):
    """RunSummary should include total token counts."""
    records = [
        EvalRecord(
            record_id=f"r{i}", problem=f"q{i}",
            reference="a", category="test",
        )
        for i in range(3)
    ]
    output_path = tmp_path / "results.jsonl"
    config = RunConfig(
        benchmark="test", backend="mock", model="m",
        max_workers=1, output_path=str(output_path),
    )
    dataset = MockDataset(records)
    backend = MockBackend()
    scorer = MockScorer(result=True)
    runner = EvalRunner(config, dataset, backend, scorer)
    summary = runner.run()
    # MockBackend returns prompt_tokens=100, completion_tokens=50
    assert summary.total_input_tokens == 300  # 3 * 100
    assert summary.total_output_tokens == 150  # 3 * 50
test_summary_has_avg_power
test_summary_has_avg_power(tmp_path)

RunSummary should include avg_power_watts.

Source code in src/openjarvis/evals/tests/test_runner.py
def test_summary_has_avg_power(self, tmp_path):
    """RunSummary should include avg_power_watts."""
    records = [
        EvalRecord(record_id="r1", problem="q", reference="a", category="test")
    ]
    output_path = tmp_path / "results.jsonl"
    config = RunConfig(
        benchmark="test", backend="mock", model="m",
        max_workers=1, output_path=str(output_path),
    )
    dataset = MockDataset(records)
    backend = MockBackend()  # returns power_watts=250.0
    scorer = MockScorer(result=True)
    runner = EvalRunner(config, dataset, backend, scorer)
    summary = runner.run()
    assert summary.avg_power_watts == 250.0