Skip to content

faster_whisper

faster_whisper

Faster-Whisper speech-to-text backend (local, CTranslate2-based).

Classes

FasterWhisperBackend

FasterWhisperBackend(model_size: str = 'base', device: str = 'auto', compute_type: str = 'float16')

Bases: SpeechBackend

Local speech-to-text using Faster-Whisper (CTranslate2).

Source code in src/openjarvis/speech/faster_whisper.py
def __init__(
    self,
    model_size: str = "base",
    device: str = "auto",
    compute_type: str = "float16",
) -> None:
    self._model_size = model_size
    self._device = device
    self._compute_type = compute_type
    self._model: Optional[WhisperModel] = None
Functions
transcribe
transcribe(audio: bytes, *, format: str = 'wav', language: Optional[str] = None) -> TranscriptionResult

Transcribe audio bytes using Faster-Whisper.

Source code in src/openjarvis/speech/faster_whisper.py
def transcribe(
    self,
    audio: bytes,
    *,
    format: str = "wav",
    language: Optional[str] = None,
) -> TranscriptionResult:
    """Transcribe audio bytes using Faster-Whisper."""
    model = self._ensure_model()

    # Write audio to a temp file (faster-whisper needs a file path)
    suffix = f".{format}" if not format.startswith(".") else format
    with tempfile.NamedTemporaryFile(suffix=suffix, delete=True) as tmp:
        tmp.write(audio)
        tmp.flush()

        kwargs = {}
        if language:
            kwargs["language"] = language

        segments_iter, info = model.transcribe(tmp.name, **kwargs)
        segments_list = list(segments_iter)

    # Build result
    text = "".join(seg.text for seg in segments_list).strip()
    segments = [
        Segment(
            text=seg.text.strip(),
            start=seg.start,
            end=seg.end,
            confidence=None,
        )
        for seg in segments_list
    ]

    return TranscriptionResult(
        text=text,
        language=getattr(info, "language", None),
        confidence=getattr(info, "language_probability", None),
        duration_seconds=getattr(info, "duration", 0.0),
        segments=segments,
    )
health
health() -> bool

Check if model is loaded or loadable.

Source code in src/openjarvis/speech/faster_whisper.py
def health(self) -> bool:
    """Check if model is loaded or loadable."""
    if self._model is not None:
        return True
    return WhisperModel is not None
supported_formats
supported_formats() -> List[str]

Supported audio formats (same as ffmpeg/Whisper).

Source code in src/openjarvis/speech/faster_whisper.py
def supported_formats(self) -> List[str]:
    """Supported audio formats (same as ffmpeg/Whisper)."""
    return ["wav", "mp3", "m4a", "ogg", "flac", "webm"]