Spaces:
Sleeping
Sleeping
ASG Models
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -167,35 +167,66 @@ def get_answer_ai(text):
|
|
167 |
AI=create_chat_session()
|
168 |
response = AI.send_message(text)
|
169 |
return response.text
|
170 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
|
172 |
-
|
173 |
-
global chat_history
|
174 |
|
175 |
-
|
176 |
-
chat_history.append((input_text, None)) # إضافة رسالة المستخدم
|
177 |
-
response_text = get_answer_ai(input_text)
|
178 |
-
response_audio = genrate_speech(response_text,'asg2024/vits-ar-sa-huba')
|
179 |
-
# elif input_audio:
|
180 |
-
# pass
|
181 |
-
# chat_history.append((None, input_audio)) # إضافة رسالة صوتية للمستخدم
|
182 |
-
# input_text = convert_speech_to_text(input_audio)
|
183 |
-
# response_text = model.generate_response(input_text, chat_history)
|
184 |
-
# response_audio = convert_text_to_speech(response_text)
|
185 |
|
186 |
-
chat_history.append((None,gr.Audio(response_audio))) # إضافة رد البوت
|
187 |
|
188 |
-
return chat_history
|
189 |
with gr.Blocks() as demo: # Use gr.Blocks to wrap the entire interface
|
190 |
|
191 |
with gr.Tab("ChatBot "):
|
192 |
-
chatbot = gr.Chatbot(
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
199 |
# audio.change(chatbot_fn, [txt, audio], chatbot)
|
200 |
|
201 |
with gr.Tab("Chat AI "):
|
|
|
167 |
AI=create_chat_session()
|
168 |
response = AI.send_message(text)
|
169 |
return response.text
|
170 |
+
import gradio as gr
|
171 |
+
import os
|
172 |
+
import plotly.express as px
|
173 |
+
|
174 |
+
# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
|
175 |
+
|
176 |
+
def random_plot():
|
177 |
+
df = px.data.iris()
|
178 |
+
fig = px.scatter(df, x="sepal_width", y="sepal_length", color="species",
|
179 |
+
size='petal_length', hover_data=['petal_width'])
|
180 |
+
return fig
|
181 |
+
|
182 |
+
def print_like_dislike(x: gr.LikeData):
|
183 |
+
print(x.index, x.value, x.liked)
|
184 |
+
|
185 |
+
def add_message(history, message):
|
186 |
+
for x in message["files"]:
|
187 |
+
history.append(((x,), None))
|
188 |
+
if message["text"] is not None:
|
189 |
+
history.append((message["text"], None))
|
190 |
+
response_audio = genrate_speech(message["text"],'asg2024/vits-ar-sa-huba')
|
191 |
+
history.append((None,gr.Audio(response_audio,scale=1,streaming=True)))
|
192 |
+
return history, gr.MultimodalTextbox(value=None, interactive=False)
|
193 |
+
|
194 |
+
def bot(history,message):
|
195 |
+
if message["text"] is not None:
|
196 |
+
txt_ai=get_answer_ai(message["text"] )
|
197 |
+
history[-1][1]=txt_ai#((None,txt_ai))
|
198 |
+
response_audio = genrate_speech(txt_ai,'asg2024/vits-ar-sa-huba')
|
199 |
+
history.append((None,gr.Audio(response_audio,scale=1,streaming=True)))
|
200 |
+
|
201 |
+
return history
|
202 |
+
|
203 |
+
fig = random_plot()
|
204 |
+
|
205 |
|
206 |
+
|
|
|
207 |
|
208 |
+
# متغير لتخزين سجل المحادثة
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
209 |
|
|
|
210 |
|
|
|
211 |
with gr.Blocks() as demo: # Use gr.Blocks to wrap the entire interface
|
212 |
|
213 |
with gr.Tab("ChatBot "):
|
214 |
+
chatbot = gr.Chatbot(
|
215 |
+
elem_id="chatbot",
|
216 |
+
bubble_full_width=False,
|
217 |
+
scale=1,
|
218 |
+
)
|
219 |
+
|
220 |
+
chat_input = gr.MultimodalTextbox(interactive=True,
|
221 |
+
file_count="single",
|
222 |
+
placeholder="Enter message or upload file...", show_label=False,)
|
223 |
+
|
224 |
+
chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
|
225 |
+
bot_msg = chat_msg.then(bot, [chatbot, chat_input], chatbot, api_name="bot_response")
|
226 |
+
bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
|
227 |
+
|
228 |
+
chatbot.like(print_like_dislike, None, None)
|
229 |
+
|
230 |
# audio.change(chatbot_fn, [txt, audio], chatbot)
|
231 |
|
232 |
with gr.Tab("Chat AI "):
|