Futuresony commited on
Commit
d25d7d6
·
verified ·
1 Parent(s): 71ccbcd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -13
app.py CHANGED
@@ -4,24 +4,24 @@ import soundfile as sf
4
  from transformers import pipeline
5
  import torch
6
 
7
- # Initialize the client for the text generation model
8
  client = InferenceClient("Futuresony/future_ai_12_10_2024.gguf")
9
 
10
- # Initialize the TTS pipeline from Huggingface
11
- synthesizer = pipeline("text-to-speech", "Futuresony/output")
12
 
13
  def respond(
14
  message,
15
- history: list[tuple[str, str]],
16
  system_message,
17
  max_tokens,
18
  temperature,
19
  top_p,
 
20
  ):
21
- # Prepare the messages for the chatbot
22
  messages = [{"role": "system", "content": system_message}]
23
 
24
- # Add history of previous conversation
25
  for val in history:
26
  if val[0]:
27
  messages.append({"role": "user", "content": val[0]})
@@ -32,7 +32,7 @@ def respond(
32
 
33
  response = ""
34
 
35
- # Generate the response from the model
36
  for message in client.chat_completion(
37
  messages,
38
  max_tokens=max_tokens,
@@ -44,17 +44,16 @@ def respond(
44
  response += token
45
  yield response
46
 
47
- # Convert the generated text to speech
48
  speech = synthesizer(response)
49
 
50
- # Save the generated speech to a file
51
  sf.write("generated_speech.wav", speech["audio"], samplerate=speech["sampling_rate"])
52
 
53
- # Return both the text and the audio for playback
54
  return response, "generated_speech.wav"
55
 
56
-
57
- # Create the Gradio interface with a textbox for the user to input a message
58
  demo = gr.Interface(
59
  fn=respond,
60
  inputs=[
@@ -68,4 +67,4 @@ demo = gr.Interface(
68
  )
69
 
70
  if __name__ == "__main__":
71
- demo.launch()
 
4
  from transformers import pipeline
5
  import torch
6
 
7
+ # Initialize the client for the text generation model.
8
  client = InferenceClient("Futuresony/future_ai_12_10_2024.gguf")
9
 
10
+ # Initialize the TTS pipeline from Huggingface.
11
+ synthesizer = pipeline("text-to-speech", model="Futuresony/output")
12
 
13
  def respond(
14
  message,
 
15
  system_message,
16
  max_tokens,
17
  temperature,
18
  top_p,
19
+ history=[]
20
  ):
21
+ # Prepare the messages for the chatbot.
22
  messages = [{"role": "system", "content": system_message}]
23
 
24
+ # Add history of previous conversation.
25
  for val in history:
26
  if val[0]:
27
  messages.append({"role": "user", "content": val[0]})
 
32
 
33
  response = ""
34
 
35
+ # Generate the response from the model.
36
  for message in client.chat_completion(
37
  messages,
38
  max_tokens=max_tokens,
 
44
  response += token
45
  yield response
46
 
47
+ # Convert the generated text to speech.
48
  speech = synthesizer(response)
49
 
50
+ # Save the generated speech to a file.
51
  sf.write("generated_speech.wav", speech["audio"], samplerate=speech["sampling_rate"])
52
 
53
+ # Return both the text and the audio for playback.
54
  return response, "generated_speech.wav"
55
 
56
+ # Create the Gradio interface with a textbox for the user to input a message.
 
57
  demo = gr.Interface(
58
  fn=respond,
59
  inputs=[
 
67
  )
68
 
69
  if __name__ == "__main__":
70
+ demo.launch()