|
| 1 | +#!/usr/bin/env python |
| 2 | +""" |
| 3 | +Inference with LoRA Fine-tuned VibeVoice ASR Model |
| 4 | +
|
| 5 | +This script loads a LoRA fine-tuned model and runs inference. |
| 6 | +
|
| 7 | +Usage: |
| 8 | + python inference_lora.py \ |
| 9 | + --base_model microsoft/VibeVoice-ASR \ |
| 10 | + --lora_path ./output \ |
| 11 | + --audio_file ./toy_dataset/0.mp3 |
| 12 | +""" |
| 13 | + |
| 14 | +import argparse |
| 15 | +import torch |
| 16 | + |
| 17 | +from peft import PeftModel |
| 18 | + |
| 19 | +from vibevoice.modular.modeling_vibevoice_asr import VibeVoiceASRForConditionalGeneration |
| 20 | +from vibevoice.processor.vibevoice_asr_processor import VibeVoiceASRProcessor |
| 21 | + |
| 22 | + |
| 23 | +def load_lora_model( |
| 24 | + base_model_path: str, |
| 25 | + lora_path: str, |
| 26 | + device: str = "cuda", |
| 27 | + dtype: torch.dtype = torch.bfloat16, |
| 28 | +): |
| 29 | + """ |
| 30 | + Load base model and merge with LoRA weights. |
| 31 | + |
| 32 | + Args: |
| 33 | + base_model_path: Path to base pretrained model |
| 34 | + lora_path: Path to LoRA adapter weights |
| 35 | + device: Device to load model on |
| 36 | + dtype: Data type for model |
| 37 | + |
| 38 | + Returns: |
| 39 | + Tuple of (model, processor) |
| 40 | + """ |
| 41 | + print(f"Loading base model from {base_model_path}") |
| 42 | + |
| 43 | + # Load processor |
| 44 | + processor = VibeVoiceASRProcessor.from_pretrained( |
| 45 | + base_model_path, |
| 46 | + language_model_pretrained_name="Qwen/Qwen2.5-7B" |
| 47 | + ) |
| 48 | + |
| 49 | + # Load base model |
| 50 | + model = VibeVoiceASRForConditionalGeneration.from_pretrained( |
| 51 | + base_model_path, |
| 52 | + dtype=dtype, |
| 53 | + device_map=device if device == "auto" else None, |
| 54 | + attn_implementation="flash_attention_2", |
| 55 | + trust_remote_code=True, |
| 56 | + ) |
| 57 | + |
| 58 | + if device != "auto": |
| 59 | + model = model.to(device) |
| 60 | + |
| 61 | + # Load LoRA adapter |
| 62 | + print(f"Loading LoRA adapter from {lora_path}") |
| 63 | + model = PeftModel.from_pretrained(model, lora_path) |
| 64 | + |
| 65 | + # Optionally merge LoRA weights into base model for faster inference |
| 66 | + # model = model.merge_and_unload() |
| 67 | + |
| 68 | + model.eval() |
| 69 | + print("Model loaded successfully") |
| 70 | + |
| 71 | + return model, processor |
| 72 | + |
| 73 | + |
| 74 | +def transcribe( |
| 75 | + model, |
| 76 | + processor, |
| 77 | + audio_path: str, |
| 78 | + max_new_tokens: int = 4096, |
| 79 | + temperature: float = 0.0, |
| 80 | + context_info: str = None, |
| 81 | + device: str = "cuda", |
| 82 | +): |
| 83 | + """ |
| 84 | + Transcribe an audio file using the LoRA fine-tuned model. |
| 85 | + |
| 86 | + Args: |
| 87 | + model: The LoRA fine-tuned model |
| 88 | + processor: The processor |
| 89 | + audio_path: Path to audio file |
| 90 | + max_new_tokens: Maximum tokens to generate |
| 91 | + temperature: Sampling temperature (0 = greedy) |
| 92 | + context_info: Optional context info (e.g., hotwords) |
| 93 | + device: Device |
| 94 | + |
| 95 | + Returns: |
| 96 | + Transcription result |
| 97 | + """ |
| 98 | + print(f"\nTranscribing: {audio_path}") |
| 99 | + |
| 100 | + # Process audio |
| 101 | + inputs = processor( |
| 102 | + audio=audio_path, |
| 103 | + sampling_rate=None, |
| 104 | + return_tensors="pt", |
| 105 | + padding=True, |
| 106 | + add_generation_prompt=True, |
| 107 | + context_info=context_info, |
| 108 | + ) |
| 109 | + |
| 110 | + # Move to device |
| 111 | + inputs = {k: v.to(device) if isinstance(v, torch.Tensor) else v |
| 112 | + for k, v in inputs.items()} |
| 113 | + |
| 114 | + # Generation config |
| 115 | + gen_config = { |
| 116 | + "max_new_tokens": max_new_tokens, |
| 117 | + "pad_token_id": processor.pad_id, |
| 118 | + "eos_token_id": processor.tokenizer.eos_token_id, |
| 119 | + "do_sample": temperature > 0, |
| 120 | + } |
| 121 | + if temperature > 0: |
| 122 | + gen_config["temperature"] = temperature |
| 123 | + gen_config["top_p"] = 0.9 |
| 124 | + |
| 125 | + # Generate |
| 126 | + with torch.no_grad(): |
| 127 | + output_ids = model.generate(**inputs, **gen_config) |
| 128 | + |
| 129 | + # Decode |
| 130 | + input_length = inputs['input_ids'].shape[1] |
| 131 | + generated_ids = output_ids[0, input_length:] |
| 132 | + generated_text = processor.decode(generated_ids, skip_special_tokens=True) |
| 133 | + |
| 134 | + # Parse structured output |
| 135 | + try: |
| 136 | + segments = processor.post_process_transcription(generated_text) |
| 137 | + except Exception as e: |
| 138 | + print(f"Warning: Failed to parse structured output: {e}") |
| 139 | + segments = [] |
| 140 | + |
| 141 | + return { |
| 142 | + "raw_text": generated_text, |
| 143 | + "segments": segments, |
| 144 | + } |
| 145 | + |
| 146 | + |
| 147 | +def main(): |
| 148 | + parser = argparse.ArgumentParser(description="Inference with LoRA Fine-tuned VibeVoice ASR") |
| 149 | + |
| 150 | + parser.add_argument( |
| 151 | + "--base_model", |
| 152 | + type=str, |
| 153 | + default="microsoft/VibeVoice-ASR", |
| 154 | + help="Path to base pretrained model" |
| 155 | + ) |
| 156 | + parser.add_argument( |
| 157 | + "--lora_path", |
| 158 | + type=str, |
| 159 | + required=True, |
| 160 | + help="Path to LoRA adapter weights" |
| 161 | + ) |
| 162 | + parser.add_argument( |
| 163 | + "--audio_file", |
| 164 | + type=str, |
| 165 | + required=True, |
| 166 | + help="Path to audio file to transcribe" |
| 167 | + ) |
| 168 | + parser.add_argument( |
| 169 | + "--context_info", |
| 170 | + type=str, |
| 171 | + default=None, |
| 172 | + help="Optional context info (e.g., 'Hotwords: Tea Brew, Aiden Host')" |
| 173 | + ) |
| 174 | + parser.add_argument( |
| 175 | + "--max_new_tokens", |
| 176 | + type=int, |
| 177 | + default=4096, |
| 178 | + help="Maximum tokens to generate" |
| 179 | + ) |
| 180 | + parser.add_argument( |
| 181 | + "--temperature", |
| 182 | + type=float, |
| 183 | + default=0.0, |
| 184 | + help="Sampling temperature (0 = greedy)" |
| 185 | + ) |
| 186 | + parser.add_argument( |
| 187 | + "--device", |
| 188 | + type=str, |
| 189 | + default="cuda" if torch.cuda.is_available() else "cpu", |
| 190 | + help="Device to use" |
| 191 | + ) |
| 192 | + |
| 193 | + args = parser.parse_args() |
| 194 | + |
| 195 | + # Load model |
| 196 | + dtype = torch.bfloat16 if args.device != "cpu" else torch.float32 |
| 197 | + model, processor = load_lora_model( |
| 198 | + base_model_path=args.base_model, |
| 199 | + lora_path=args.lora_path, |
| 200 | + device=args.device, |
| 201 | + dtype=dtype, |
| 202 | + ) |
| 203 | + |
| 204 | + # Transcribe |
| 205 | + result = transcribe( |
| 206 | + model=model, |
| 207 | + processor=processor, |
| 208 | + audio_path=args.audio_file, |
| 209 | + max_new_tokens=args.max_new_tokens, |
| 210 | + temperature=args.temperature, |
| 211 | + context_info=args.context_info, |
| 212 | + device=args.device, |
| 213 | + ) |
| 214 | + |
| 215 | + # Print results |
| 216 | + print("\n" + "="*60) |
| 217 | + print("Transcription Result") |
| 218 | + print("="*60) |
| 219 | + |
| 220 | + print("\n--- Raw Output ---") |
| 221 | + raw_text = result['raw_text'] |
| 222 | + print(raw_text[:2000] + "..." if len(raw_text) > 2000 else raw_text) |
| 223 | + |
| 224 | + if result['segments']: |
| 225 | + print(f"\n--- Structured Output ({len(result['segments'])} segments) ---") |
| 226 | + for seg in result['segments'][:20]: |
| 227 | + print(f"[{seg.get('start_time', 'N/A')} - {seg.get('end_time', 'N/A')}] " |
| 228 | + f"Speaker {seg.get('speaker_id', 'N/A')}: {seg.get('text', '')[:80]}...") |
| 229 | + if len(result['segments']) > 20: |
| 230 | + print(f" ... and {len(result['segments']) - 20} more segments") |
| 231 | + |
| 232 | + |
| 233 | +if __name__ == "__main__": |
| 234 | + main() |
0 commit comments