Update README.md

#18
by eustlb HF Staff - opened
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -333,11 +333,11 @@ print("\n\n")
333
 
334
  ### Transformers 🤗
335
 
336
- Voxtral is supported in Transformers natively!
337
 
338
- Install Transformers from source:
339
  ```bash
340
- pip install git+https://github.com/huggingface/transformers
341
  ```
342
 
343
  Make sure to have `mistral-common >= 1.8.1` installed with audio dependencies:
@@ -603,7 +603,7 @@ repo_id = "mistralai/Voxtral-Small-24B-2507"
603
  processor = AutoProcessor.from_pretrained(repo_id)
604
  model = VoxtralForConditionalGeneration.from_pretrained(repo_id, torch_dtype=torch.bfloat16, device_map=device)
605
 
606
- inputs = processor.apply_transcrition_request(language="en", audio="https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/obama.mp3", model_id=repo_id)
607
  inputs = inputs.to(device, dtype=torch.bfloat16)
608
 
609
  outputs = model.generate(**inputs, max_new_tokens=500)
 
333
 
334
  ### Transformers 🤗
335
 
336
+ Starting with `transformers >= 4.54.0` and above, you can run Voxtral natively!
337
 
338
+ Install Transformers:
339
  ```bash
340
+ pip install -U transformers
341
  ```
342
 
343
  Make sure to have `mistral-common >= 1.8.1` installed with audio dependencies:
 
603
  processor = AutoProcessor.from_pretrained(repo_id)
604
  model = VoxtralForConditionalGeneration.from_pretrained(repo_id, torch_dtype=torch.bfloat16, device_map=device)
605
 
606
+ inputs = processor.apply_transcription_request(language="en", audio="https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/obama.mp3", model_id=repo_id)
607
  inputs = inputs.to(device, dtype=torch.bfloat16)
608
 
609
  outputs = model.generate(**inputs, max_new_tokens=500)