Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -117,22 +117,22 @@ def transcribe(audio_path):
|
|
117 |
|
118 |
return text
|
119 |
|
120 |
-
def model(text, web_search):
|
121 |
if web_search is True:
|
122 |
"""Performs a web search, feeds the results to a language model, and returns the answer."""
|
123 |
web_results = search(text)
|
124 |
web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
|
125 |
formatted_prompt = system_instructions1 + text + "[WEB]" + str(web2) + "[OpenGPT 4o]"
|
126 |
-
stream = client1.text_generation(formatted_prompt, max_new_tokens=
|
127 |
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
128 |
else:
|
129 |
formatted_prompt = system_instructions1 + text + "[OpenGPT 4o]"
|
130 |
-
stream = client1.text_generation(formatted_prompt, max_new_tokens=
|
131 |
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
132 |
|
133 |
-
async def respond(audio, web_search, voice):
|
134 |
user = transcribe(audio)
|
135 |
-
reply = model(user, web_search)
|
136 |
communicate = edge_tts.Communicate(reply, voice=voice)
|
137 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
|
138 |
tmp_path = tmp_file.name
|
@@ -151,7 +151,8 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
151 |
audio_input = gr.Audio(label="π€ Speak or Upload Audio", sources="microphone", type="filepath")
|
152 |
web_search = gr.Checkbox(label="π Enable Web Search", value=False)
|
153 |
voice = gr.Dropdown(label="π€ Choose Voice", choices=voices, value="en-US-JennyNeural")
|
154 |
-
|
|
|
155 |
with gr.Column():
|
156 |
audio_output = gr.Audio(label="π€ AI Response", autoplay=True)
|
157 |
user_text = gr.Textbox(label="π€ You Said", interactive=False)
|
@@ -162,8 +163,6 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
162 |
|
163 |
with gr.TabItem("Settings"):
|
164 |
gr.Markdown("### βοΈ Settings")
|
165 |
-
max_tokens = gr.Slider(minimum=50, maximum=500, value=300, label="Max Tokens")
|
166 |
-
temperature = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Temperature")
|
167 |
gr.Markdown("Adjust the parameters to customize the AI's behavior.")
|
168 |
|
169 |
# Store conversation history
|
@@ -173,9 +172,10 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
173 |
conversation_history.append([user_input, ai_response])
|
174 |
return conversation_history
|
175 |
|
176 |
-
|
|
|
177 |
fn=respond,
|
178 |
-
inputs=[audio_input, web_search, voice],
|
179 |
outputs=[audio_output, user_text, ai_text]
|
180 |
).then(
|
181 |
fn=update_history,
|
|
|
117 |
|
118 |
return text
|
119 |
|
120 |
+
def model(text, web_search, max_tokens, temperature):
|
121 |
if web_search is True:
|
122 |
"""Performs a web search, feeds the results to a language model, and returns the answer."""
|
123 |
web_results = search(text)
|
124 |
web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
|
125 |
formatted_prompt = system_instructions1 + text + "[WEB]" + str(web2) + "[OpenGPT 4o]"
|
126 |
+
stream = client1.text_generation(formatted_prompt, max_new_tokens=max_tokens, temperature=temperature, stream=True, details=True, return_full_text=False)
|
127 |
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
128 |
else:
|
129 |
formatted_prompt = system_instructions1 + text + "[OpenGPT 4o]"
|
130 |
+
stream = client1.text_generation(formatted_prompt, max_new_tokens=max_tokens, temperature=temperature, stream=True, details=True, return_full_text=False)
|
131 |
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
132 |
|
133 |
+
async def respond(audio, web_search, voice, max_tokens, temperature):
|
134 |
user = transcribe(audio)
|
135 |
+
reply = model(user, web_search, max_tokens, temperature)
|
136 |
communicate = edge_tts.Communicate(reply, voice=voice)
|
137 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
|
138 |
tmp_path = tmp_file.name
|
|
|
151 |
audio_input = gr.Audio(label="π€ Speak or Upload Audio", sources="microphone", type="filepath")
|
152 |
web_search = gr.Checkbox(label="π Enable Web Search", value=False)
|
153 |
voice = gr.Dropdown(label="π€ Choose Voice", choices=voices, value="en-US-JennyNeural")
|
154 |
+
max_tokens = gr.Slider(minimum=50, maximum=500, value=300, label="Max Tokens")
|
155 |
+
temperature = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Temperature")
|
156 |
with gr.Column():
|
157 |
audio_output = gr.Audio(label="π€ AI Response", autoplay=True)
|
158 |
user_text = gr.Textbox(label="π€ You Said", interactive=False)
|
|
|
163 |
|
164 |
with gr.TabItem("Settings"):
|
165 |
gr.Markdown("### βοΈ Settings")
|
|
|
|
|
166 |
gr.Markdown("Adjust the parameters to customize the AI's behavior.")
|
167 |
|
168 |
# Store conversation history
|
|
|
172 |
conversation_history.append([user_input, ai_response])
|
173 |
return conversation_history
|
174 |
|
175 |
+
# Automatically submit when audio is detected
|
176 |
+
audio_input.change(
|
177 |
fn=respond,
|
178 |
+
inputs=[audio_input, web_search, voice, max_tokens, temperature],
|
179 |
outputs=[audio_output, user_text, ai_text]
|
180 |
).then(
|
181 |
fn=update_history,
|