ASG Models commited on
Commit
973c041
·
verified ·
1 Parent(s): 696ac95

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +110 -1
app.py CHANGED
@@ -5,6 +5,93 @@ import numpy as np
5
  import gradio as gr
6
  import requests
7
  from genai_chat_ai import AI,create_chat_session
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  api_key = os.environ.get("Id_mode_vits")
9
  headers = {"Authorization": f"Bearer {api_key}"}
10
 
@@ -30,7 +117,15 @@ def genrate_speech(text,name_model):
30
  speaker_id=0
31
  ).waveform.cpu().numpy().reshape(-1)
32
  return model.config.sampling_rate,wav
33
-
 
 
 
 
 
 
 
 
34
 
35
  def remove_extra_spaces(text):
36
  """
@@ -113,6 +208,20 @@ with gr.Blocks() as demo: # Use gr.Blocks to wrap the entire interface
113
  inputs=[text_input, model_choices],
114
  outputs=[ai_audio],
115
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
  if __name__ == "__main__":
118
  demo.launch()
 
5
  import gradio as gr
6
  import requests
7
  from genai_chat_ai import AI,create_chat_session
8
+
9
+ import torch
10
+ from typing import Any, Callable, Optional, Tuple, Union,Iterator
11
+ import numpy as np
12
+ import torch.nn as nn # Import the missing module
13
+
14
+
15
+
16
+ def _inference_forward_stream(
17
+ self,
18
+ input_ids: Optional[torch.Tensor] = None,
19
+ attention_mask: Optional[torch.Tensor] = None,
20
+ speaker_embeddings: Optional[torch.Tensor] = None,
21
+ output_attentions: Optional[bool] = None,
22
+ output_hidden_states: Optional[bool] = None,
23
+ return_dict: Optional[bool] = None,
24
+ padding_mask: Optional[torch.Tensor] = None,
25
+ chunk_size: int = 32, # Chunk size for streaming output
26
+ ) -> Iterator[torch.Tensor]:
27
+ """Generates speech waveforms in a streaming fashion."""
28
+ if attention_mask is not None:
29
+ padding_mask = attention_mask.unsqueeze(-1).float()
30
+ else:
31
+ padding_mask = torch.ones_like(input_ids).unsqueeze(-1).float()
32
+
33
+
34
+
35
+ text_encoder_output = self.text_encoder(
36
+ input_ids=input_ids,
37
+ padding_mask=padding_mask,
38
+ attention_mask=attention_mask,
39
+ output_attentions=output_attentions,
40
+ output_hidden_states=output_hidden_states,
41
+ return_dict=return_dict,
42
+ )
43
+ hidden_states = text_encoder_output[0] if not return_dict else text_encoder_output.last_hidden_state
44
+ hidden_states = hidden_states.transpose(1, 2)
45
+ input_padding_mask = padding_mask.transpose(1, 2)
46
+
47
+ prior_means = text_encoder_output[1] if not return_dict else text_encoder_output.prior_means
48
+ prior_log_variances = text_encoder_output[2] if not return_dict else text_encoder_output.prior_log_variances
49
+
50
+ if self.config.use_stochastic_duration_prediction:
51
+ log_duration = self.duration_predictor(
52
+ hidden_states,
53
+ input_padding_mask,
54
+ speaker_embeddings,
55
+ reverse=True,
56
+ noise_scale=self.noise_scale_duration,
57
+ )
58
+ else:
59
+ log_duration = self.duration_predictor(hidden_states, input_padding_mask, speaker_embeddings)
60
+
61
+ length_scale = 1.0 / self.speaking_rate
62
+ duration = torch.ceil(torch.exp(log_duration) * input_padding_mask * length_scale)
63
+ predicted_lengths = torch.clamp_min(torch.sum(duration, [1, 2]), 1).long()
64
+
65
+
66
+ # Create a padding mask for the output lengths of shape (batch, 1, max_output_length)
67
+ indices = torch.arange(predicted_lengths.max(), dtype=predicted_lengths.dtype, device=predicted_lengths.device)
68
+ output_padding_mask = indices.unsqueeze(0) < predicted_lengths.unsqueeze(1)
69
+ output_padding_mask = output_padding_mask.unsqueeze(1).to(input_padding_mask.dtype)
70
+
71
+ # Reconstruct an attention tensor of shape (batch, 1, out_length, in_length)
72
+ attn_mask = torch.unsqueeze(input_padding_mask, 2) * torch.unsqueeze(output_padding_mask, -1)
73
+ batch_size, _, output_length, input_length = attn_mask.shape
74
+ cum_duration = torch.cumsum(duration, -1).view(batch_size * input_length, 1)
75
+ indices = torch.arange(output_length, dtype=duration.dtype, device=duration.device)
76
+ valid_indices = indices.unsqueeze(0) < cum_duration
77
+ valid_indices = valid_indices.to(attn_mask.dtype).view(batch_size, input_length, output_length)
78
+ padded_indices = valid_indices - nn.functional.pad(valid_indices, [0, 0, 1, 0, 0, 0])[:, :-1]
79
+ attn = padded_indices.unsqueeze(1).transpose(2, 3) * attn_mask
80
+
81
+ # Expand prior distribution
82
+ prior_means = torch.matmul(attn.squeeze(1), prior_means).transpose(1, 2)
83
+ prior_log_variances = torch.matmul(attn.squeeze(1), prior_log_variances).transpose(1, 2)
84
+
85
+ prior_latents = prior_means + torch.randn_like(prior_means) * torch.exp(prior_log_variances) * self.noise_scale
86
+ latents = self.flow(prior_latents, output_padding_mask, speaker_embeddings, reverse=True)
87
+
88
+ spectrogram = latents * output_padding_mask
89
+
90
+ for i in range(0, spectrogram.size(-1), chunk_size):
91
+ yield self.decoder(spectrogram[:,:,i : i + chunk_size] ,speaker_embeddings)
92
+
93
+
94
+
95
  api_key = os.environ.get("Id_mode_vits")
96
  headers = {"Authorization": f"Bearer {api_key}"}
97
 
 
117
  speaker_id=0
118
  ).waveform.cpu().numpy().reshape(-1)
119
  return model.config.sampling_rate,wav
120
+
121
+ def generate_audio(text, speaker_id=None):
122
+ inputs = tokenizer(text, return_tensors="pt")#.input_ids
123
+
124
+ speaker_embeddings = None
125
+ #torch.cuda.empty_cache()
126
+ with torch.no_grad():
127
+ for chunk in _inference_forward_stream(model,input_ids=inputs.input_ids,attention_mask=inputs.attention_mask,speaker_embeddings= speaker_embeddings,chunk_size=64):
128
+ yield 16000,chunk.squeeze().cpu().numpy()#.astype(np.int16).tobytes()
129
 
130
  def remove_extra_spaces(text):
131
  """
 
208
  inputs=[text_input, model_choices],
209
  outputs=[ai_audio],
210
  )
211
+ with gr.Tab("Live : تحويل النص إلى كلام"):
212
+ gr.Markdown("## VITS: تحويل النص إلى كلام")
213
+ with gr.Row():
214
+ text_input = gr.Textbox(label="أدخل النص هنا")
215
+ speaker_id_input = gr.Number(label="معرّف المتحدث (اختياري)", interactive=True)
216
+ generate_button = gr.Button("توليد وتشغيل الصوت")
217
+
218
+ audio_player = gr.Audio(label="أ audio",streaming=True)
219
+
220
+ # Update the event binding
221
+ generate_button.click(generate_audio, inputs=[text_input], outputs=audio_player)
222
+
223
+
224
+
225
 
226
  if __name__ == "__main__":
227
  demo.launch()