yasserrmd commited on
Commit
2870476
·
verified ·
1 Parent(s): 5b95654

Delete inference_from_file.py

Browse files
Files changed (1) hide show
  1. inference_from_file.py +0 -336
inference_from_file.py DELETED
@@ -1,336 +0,0 @@
1
- import argparse
2
- import os
3
- import re
4
- from typing import List, Tuple, Union, Dict, Any
5
- import time
6
- import torch
7
-
8
- from vibevoice.modular.modeling_vibevoice_inference import VibeVoiceForConditionalGenerationInference
9
- from vibevoice.processor.vibevoice_processor import VibeVoiceProcessor
10
- from transformers.utils import logging
11
-
12
- logging.set_verbosity_info()
13
- logger = logging.get_logger(__name__)
14
-
15
-
16
- class VoiceMapper:
17
- """Maps speaker names to voice file paths"""
18
-
19
- def __init__(self):
20
- self.setup_voice_presets()
21
-
22
- # change name according to our preset wav file
23
- new_dict = {}
24
- for name, path in self.voice_presets.items():
25
-
26
- if '_' in name:
27
- name = name.split('_')[0]
28
-
29
- if '-' in name:
30
- name = name.split('-')[-1]
31
-
32
- new_dict[name] = path
33
- self.voice_presets.update(new_dict)
34
- # print(list(self.voice_presets.keys()))
35
-
36
- def setup_voice_presets(self):
37
- """Setup voice presets by scanning the voices directory."""
38
- voices_dir = os.path.join(os.path.dirname(__file__), "voices")
39
-
40
- # Check if voices directory exists
41
- if not os.path.exists(voices_dir):
42
- print(f"Warning: Voices directory not found at {voices_dir}")
43
- self.voice_presets = {}
44
- self.available_voices = {}
45
- return
46
-
47
- # Scan for all WAV files in the voices directory
48
- self.voice_presets = {}
49
-
50
- # Get all .wav files in the voices directory
51
- wav_files = [f for f in os.listdir(voices_dir)
52
- if f.lower().endswith('.wav') and os.path.isfile(os.path.join(voices_dir, f))]
53
-
54
- # Create dictionary with filename (without extension) as key
55
- for wav_file in wav_files:
56
- # Remove .wav extension to get the name
57
- name = os.path.splitext(wav_file)[0]
58
- # Create full path
59
- full_path = os.path.join(voices_dir, wav_file)
60
- self.voice_presets[name] = full_path
61
-
62
- # Sort the voice presets alphabetically by name for better UI
63
- self.voice_presets = dict(sorted(self.voice_presets.items()))
64
-
65
- # Filter out voices that don't exist (this is now redundant but kept for safety)
66
- self.available_voices = {
67
- name: path for name, path in self.voice_presets.items()
68
- if os.path.exists(path)
69
- }
70
-
71
- print(f"Found {len(self.available_voices)} voice files in {voices_dir}")
72
- print(f"Available voices: {', '.join(self.available_voices.keys())}")
73
-
74
- def get_voice_path(self, speaker_name: str) -> str:
75
- """Get voice file path for a given speaker name"""
76
- # First try exact match
77
- if speaker_name in self.voice_presets:
78
- return self.voice_presets[speaker_name]
79
-
80
- # Try partial matching (case insensitive)
81
- speaker_lower = speaker_name.lower()
82
- for preset_name, path in self.voice_presets.items():
83
- if preset_name.lower() in speaker_lower or speaker_lower in preset_name.lower():
84
- return path
85
-
86
- # Default to first voice if no match found
87
- default_voice = list(self.voice_presets.values())[0]
88
- print(f"Warning: No voice preset found for '{speaker_name}', using default voice: {default_voice}")
89
- return default_voice
90
-
91
-
92
- def parse_txt_script(txt_content: str) -> Tuple[List[str], List[str]]:
93
- """
94
- Parse txt script content and extract speakers and their text
95
- Fixed pattern: Speaker 1, Speaker 2, Speaker 3, Speaker 4
96
- Returns: (scripts, speaker_numbers)
97
- """
98
- lines = txt_content.strip().split('\n')
99
- scripts = []
100
- speaker_numbers = []
101
-
102
- # Pattern to match "Speaker X:" format where X is a number
103
- speaker_pattern = r'^Speaker\s+(\d+):\s*(.*)$'
104
-
105
- current_speaker = None
106
- current_text = ""
107
-
108
- for line in lines:
109
- line = line.strip()
110
- if not line:
111
- continue
112
-
113
- match = re.match(speaker_pattern, line, re.IGNORECASE)
114
- if match:
115
- # If we have accumulated text from previous speaker, save it
116
- if current_speaker and current_text:
117
- scripts.append(f"Speaker {current_speaker}: {current_text.strip()}")
118
- speaker_numbers.append(current_speaker)
119
-
120
- # Start new speaker
121
- current_speaker = match.group(1).strip()
122
- current_text = match.group(2).strip()
123
- else:
124
- # Continue text for current speaker
125
- if current_text:
126
- current_text += " " + line
127
- else:
128
- current_text = line
129
-
130
- # Don't forget the last speaker
131
- if current_speaker and current_text:
132
- scripts.append(f"Speaker {current_speaker}: {current_text.strip()}")
133
- speaker_numbers.append(current_speaker)
134
-
135
- return scripts, speaker_numbers
136
-
137
-
138
- def parse_args():
139
- parser = argparse.ArgumentParser(description="VibeVoice Processor TXT Input Test")
140
- parser.add_argument(
141
- "--model_path",
142
- type=str,
143
- default="microsoft/VibeVoice-1.5b",
144
- help="Path to the HuggingFace model directory",
145
- )
146
-
147
- parser.add_argument(
148
- "--txt_path",
149
- type=str,
150
- default="demo/text_examples/1p_abs.txt",
151
- help="Path to the txt file containing the script",
152
- )
153
- parser.add_argument(
154
- "--speaker_names",
155
- type=str,
156
- nargs='+',
157
- default='Andrew',
158
- help="Speaker names in order (e.g., --speaker_names Andrew Ava 'Bill Gates')",
159
- )
160
- parser.add_argument(
161
- "--output_dir",
162
- type=str,
163
- default="./outputs",
164
- help="Directory to save output audio files",
165
- )
166
- parser.add_argument(
167
- "--device",
168
- type=str,
169
- default="cuda" if torch.cuda.is_available() else "cpu",
170
- help="Device for tensor tests",
171
- )
172
- parser.add_argument(
173
- "--cfg_scale",
174
- type=float,
175
- default=1.3,
176
- help="CFG (Classifier-Free Guidance) scale for generation (default: 1.3)",
177
- )
178
-
179
- return parser.parse_args()
180
-
181
- def main():
182
- args = parse_args()
183
-
184
- # Initialize voice mapper
185
- voice_mapper = VoiceMapper()
186
-
187
- # Check if txt file exists
188
- if not os.path.exists(args.txt_path):
189
- print(f"Error: txt file not found: {args.txt_path}")
190
- return
191
-
192
- # Read and parse txt file
193
- print(f"Reading script from: {args.txt_path}")
194
- with open(args.txt_path, 'r', encoding='utf-8') as f:
195
- txt_content = f.read()
196
-
197
- # Parse the txt content to get speaker numbers
198
- scripts, speaker_numbers = parse_txt_script(txt_content)
199
-
200
- if not scripts:
201
- print("Error: No valid speaker scripts found in the txt file")
202
- return
203
-
204
- print(f"Found {len(scripts)} speaker segments:")
205
- for i, (script, speaker_num) in enumerate(zip(scripts, speaker_numbers)):
206
- print(f" {i+1}. Speaker {speaker_num}")
207
- print(f" Text preview: {script[:100]}...")
208
-
209
- # Map speaker numbers to provided speaker names
210
- speaker_name_mapping = {}
211
- speaker_names_list = args.speaker_names if isinstance(args.speaker_names, list) else [args.speaker_names]
212
- for i, name in enumerate(speaker_names_list, 1):
213
- speaker_name_mapping[str(i)] = name
214
-
215
- print(f"\nSpeaker mapping:")
216
- for speaker_num in set(speaker_numbers):
217
- mapped_name = speaker_name_mapping.get(speaker_num, f"Speaker {speaker_num}")
218
- print(f" Speaker {speaker_num} -> {mapped_name}")
219
-
220
- # Map speakers to voice files using the provided speaker names
221
- voice_samples = []
222
- actual_speakers = []
223
-
224
- # Get unique speaker numbers in order of first appearance
225
- unique_speaker_numbers = []
226
- seen = set()
227
- for speaker_num in speaker_numbers:
228
- if speaker_num not in seen:
229
- unique_speaker_numbers.append(speaker_num)
230
- seen.add(speaker_num)
231
-
232
- for speaker_num in unique_speaker_numbers:
233
- speaker_name = speaker_name_mapping.get(speaker_num, f"Speaker {speaker_num}")
234
- voice_path = voice_mapper.get_voice_path(speaker_name)
235
- voice_samples.append(voice_path)
236
- actual_speakers.append(speaker_name)
237
- print(f"Speaker {speaker_num} ('{speaker_name}') -> Voice: {os.path.basename(voice_path)}")
238
-
239
- # Prepare data for model
240
- full_script = '\n'.join(scripts)
241
-
242
- # Load processor
243
- print(f"Loading processor & model from {args.model_path}")
244
- processor = VibeVoiceProcessor.from_pretrained(args.model_path)
245
-
246
- # Load model
247
- model = VibeVoiceForConditionalGenerationInference.from_pretrained(
248
- args.model_path,
249
- torch_dtype=torch.bfloat16,
250
- device_map='cuda',
251
- attn_implementation="flash_attention_2" # we only test flash_attention_2
252
- )
253
-
254
- model.eval()
255
- model.set_ddpm_inference_steps(num_steps=10)
256
-
257
- if hasattr(model.model, 'language_model'):
258
- print(f"Language model attention: {model.model.language_model.config._attn_implementation}")
259
-
260
- # Prepare inputs for the model
261
- inputs = processor(
262
- text=[full_script], # Wrap in list for batch processing
263
- voice_samples=[voice_samples], # Wrap in list for batch processing
264
- padding=True,
265
- return_tensors="pt",
266
- return_attention_mask=True,
267
- )
268
- print(f"Starting generation with cfg_scale: {args.cfg_scale}")
269
-
270
- # Generate audio
271
- start_time = time.time()
272
- outputs = model.generate(
273
- **inputs,
274
- max_new_tokens=None,
275
- cfg_scale=args.cfg_scale,
276
- tokenizer=processor.tokenizer,
277
- # generation_config={'do_sample': False, 'temperature': 0.95, 'top_p': 0.95, 'top_k': 0},
278
- generation_config={'do_sample': False},
279
- verbose=True,
280
- )
281
- generation_time = time.time() - start_time
282
- print(f"Generation time: {generation_time:.2f} seconds")
283
-
284
- # Calculate audio duration and additional metrics
285
- if outputs.speech_outputs and outputs.speech_outputs[0] is not None:
286
- # Assuming 24kHz sample rate (common for speech synthesis)
287
- sample_rate = 24000
288
- audio_samples = outputs.speech_outputs[0].shape[-1] if len(outputs.speech_outputs[0].shape) > 0 else len(outputs.speech_outputs[0])
289
- audio_duration = audio_samples / sample_rate
290
- rtf = generation_time / audio_duration if audio_duration > 0 else float('inf')
291
-
292
- print(f"Generated audio duration: {audio_duration:.2f} seconds")
293
- print(f"RTF (Real Time Factor): {rtf:.2f}x")
294
- else:
295
- print("No audio output generated")
296
-
297
- # Calculate token metrics
298
- input_tokens = inputs['input_ids'].shape[1] # Number of input tokens
299
- output_tokens = outputs.sequences.shape[1] # Total tokens (input + generated)
300
- generated_tokens = output_tokens - input_tokens
301
-
302
- print(f"Prefilling tokens: {input_tokens}")
303
- print(f"Generated tokens: {generated_tokens}")
304
- print(f"Total tokens: {output_tokens}")
305
-
306
- # Save output
307
- txt_filename = os.path.splitext(os.path.basename(args.txt_path))[0]
308
- output_path = os.path.join(args.output_dir, f"{txt_filename}_generated.wav")
309
- os.makedirs(args.output_dir, exist_ok=True)
310
-
311
- processor.save_audio(
312
- outputs.speech_outputs[0], # First (and only) batch item
313
- output_path=output_path,
314
- )
315
- print(f"Saved output to {output_path}")
316
-
317
- # Print summary
318
- print("\n" + "="*50)
319
- print("GENERATION SUMMARY")
320
- print("="*50)
321
- print(f"Input file: {args.txt_path}")
322
- print(f"Output file: {output_path}")
323
- print(f"Speaker names: {args.speaker_names}")
324
- print(f"Number of unique speakers: {len(set(speaker_numbers))}")
325
- print(f"Number of segments: {len(scripts)}")
326
- print(f"Prefilling tokens: {input_tokens}")
327
- print(f"Generated tokens: {generated_tokens}")
328
- print(f"Total tokens: {output_tokens}")
329
- print(f"Generation time: {generation_time:.2f} seconds")
330
- print(f"Audio duration: {audio_duration:.2f} seconds")
331
- print(f"RTF (Real Time Factor): {rtf:.2f}x")
332
-
333
- print("="*50)
334
-
335
- if __name__ == "__main__":
336
- main()