Skip to content

Commit 0b6229d

Browse files
committed
fix(voxtral): correct typo in apply_transcription_request
1 parent fbeaf96 commit 0b6229d

File tree

2 files changed

+4
-4
lines changed

2 files changed

+4
-4
lines changed

src/transformers/models/voxtral/processing_voxtral.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -242,7 +242,7 @@ def __call__(
242242
the text. Please refer to the docstring of the above methods for more information.
243243
This methods does not support audio. To prepare the audio, please use:
244244
1. `apply_chat_template` [`~VoxtralProcessor.apply_chat_template`] method.
245-
2. `apply_transcrition_request` [`~VoxtralProcessor.apply_transcrition_request`] method.
245+
2. `apply_transcription_request` [`~VoxtralProcessor.apply_transcription_request`] method.
246246
247247
Args:
248248
text (`str`, `list[str]`, `list[list[str]]`):
@@ -284,7 +284,7 @@ def __call__(
284284
return BatchFeature(data=out, tensor_type=common_kwargs.pop("return_tensors", None))
285285

286286
# TODO: @eustlb, this should be moved to mistral_common + testing
287-
def apply_transcrition_request(
287+
def apply_transcription_request(
288288
self,
289289
language: Union[str, list[str]],
290290
audio: Union[str, list[str], AudioInput],
@@ -306,7 +306,7 @@ def apply_transcrition_request(
306306
language = "en"
307307
audio = "https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/obama.mp3"
308308
309-
inputs = processor.apply_transcrition_request(language=language, audio=audio, model_id=model_id)
309+
inputs = processor.apply_transcription_request(language=language, audio=audio, model_id=model_id)
310310
```
311311
312312
Args:

tests/models/voxtral/test_modeling_voxtral.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -456,7 +456,7 @@ def test_transcribe_mode_audio_input(self):
456456
model = VoxtralForConditionalGeneration.from_pretrained(
457457
self.checkpoint_name, torch_dtype=self.dtype, device_map=torch_device
458458
)
459-
inputs = self.processor.apply_transcrition_request(
459+
inputs = self.processor.apply_transcription_request(
460460
language="en",
461461
audio="https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/obama.mp3",
462462
model_id=self.checkpoint_name,

0 commit comments

Comments
 (0)