JaweriaGenAI commited on
Commit
ec2b48a
Β·
verified Β·
1 Parent(s): 0f30cfb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -29
app.py CHANGED
@@ -41,25 +41,30 @@ def load_chat_file(filename):
41
  history = json.load(f)
42
  chat_display = [(m["content"], None) if m["role"] == "user" else (None, m["content"]) for m in history]
43
  return chat_display, history
44
- except:
 
45
  return [], []
46
 
47
  # Chat handler
48
  def chat_with_groq(message, history):
49
- messages = [{"role": "system", "content": "You are Neobot – a helpful, professional assistant."}]
50
- messages += history + [{"role": "user", "content": message}]
 
51
 
52
- headers = {"Authorization": f"Bearer {groq_key}", "Content-Type": "application/json"}
53
- payload = {"model": "llama3-70b-8192", "messages": messages}
54
 
55
- res = requests.post("https://api.groq.com/openai/v1/chat/completions", headers=headers, json=payload)
56
- reply = res.json()["choices"][0]["message"]["content"]
57
- reply = filter_emojis(reply)
58
 
59
- history += [{"role": "user", "content": message}, {"role": "assistant", "content": reply}]
60
- save_chat_auto(history)
61
- chat_display = [(m["content"], None) if m["role"] == "user" else (None, m["content"]) for m in history]
62
- return "", chat_display, history, list_saved_chats()
 
 
 
63
 
64
  # Transcribe audio
65
  def transcribe_audio(audio_path):
@@ -89,7 +94,7 @@ textarea, input[type='text'] { border: 1px solid #ccc; border-radius: 20px; padd
89
  with gr.Column(scale=1):
90
  gr.Markdown("<h2 style='text-align:center;'>Chats</h2>")
91
  new_chat = gr.Button("πŸ†• New Chat")
92
- saved_chats_list = gr.Column(elem_id="chat_list")
93
 
94
  with gr.Column(scale=3):
95
  gr.Markdown("<h1 style='text-align:center;'>Neobot</h1>")
@@ -98,35 +103,35 @@ textarea, input[type='text'] { border: 1px solid #ccc; border-radius: 20px; padd
98
  chat_input = gr.Textbox(placeholder="Type or speak here...", scale=8, show_label=False)
99
  plus = gr.Button("βž•")
100
  mic = gr.Button("πŸŽ™οΈ")
101
- hidden_audio = gr.Audio(type="filepath", visible=False)
102
  file_input = gr.File(file_types=[".mp3", ".wav"], visible=False)
103
 
104
  send = gr.Button("Send πŸš€")
105
- send.click(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state, saved_chats_list])
106
- chat_input.submit(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state, saved_chats_list])
107
 
108
  def refresh_chat_list():
109
- chats = list_saved_chats()
110
- return [gr.Button(value=chat, scale=1) for chat in chats]
111
-
112
- def load_chat_btn(filename):
113
- return load_chat_file(filename)
 
 
114
 
115
  def start_stop_recording(is_recording):
116
  if not is_recording:
117
- return "Recording...", True, gr.update(visible=True)
118
  else:
119
- return "", False, gr.update(visible=False)
120
 
121
- def handle_audio(audio):
122
- return transcribe_audio(audio), gr.update(visible=False)
123
 
124
  new_chat.click(lambda: ("", [], []), outputs=[chat_input, chatbot, state])
125
  plus.click(lambda: gr.update(visible=True), outputs=[file_input])
126
  file_input.change(transcribe_audio, inputs=file_input, outputs=chat_input)
127
- mic.click(start_stop_recording, inputs=recording, outputs=[chat_input, recording, hidden_audio])
128
- hidden_audio.change(handle_audio, inputs=hidden_audio, outputs=[chat_input, hidden_audio])
129
 
130
- demo.load(refresh_chat_list, outputs=[saved_chats_list])
131
 
132
- demo.launch()
 
41
  history = json.load(f)
42
  chat_display = [(m["content"], None) if m["role"] == "user" else (None, m["content"]) for m in history]
43
  return chat_display, history
44
+ except Exception as e:
45
+ print(f"Load error: {e}")
46
  return [], []
47
 
48
  # Chat handler
49
  def chat_with_groq(message, history):
50
+ try:
51
+ messages = [{"role": "system", "content": "You are Neobot – a helpful, professional assistant."}]
52
+ messages += history + [{"role": "user", "content": message}]
53
 
54
+ headers = {"Authorization": f"Bearer {groq_key}", "Content-Type": "application/json"}
55
+ payload = {"model": "llama3-70b-8192", "messages": messages}
56
 
57
+ res = requests.post("https://api.groq.com/openai/v1/chat/completions", headers=headers, json=payload)
58
+ reply = res.json()["choices"][0]["message"]["content"]
59
+ reply = filter_emojis(reply)
60
 
61
+ history += [{"role": "user", "content": message}, {"role": "assistant", "content": reply}]
62
+ save_chat_auto(history)
63
+ chat_display = [(m["content"], None) if m["role"] == "user" else (None, m["content"]) for m in history]
64
+ return "", chat_display, history, refresh_chat_list()
65
+ except Exception as e:
66
+ print(f"Chat error: {e}")
67
+ return "", [(None, "❌ Error occurred while responding.")], history, refresh_chat_list()
68
 
69
  # Transcribe audio
70
  def transcribe_audio(audio_path):
 
94
  with gr.Column(scale=1):
95
  gr.Markdown("<h2 style='text-align:center;'>Chats</h2>")
96
  new_chat = gr.Button("πŸ†• New Chat")
97
+ saved_chats_container = gr.Column(elem_id="chat_list")
98
 
99
  with gr.Column(scale=3):
100
  gr.Markdown("<h1 style='text-align:center;'>Neobot</h1>")
 
103
  chat_input = gr.Textbox(placeholder="Type or speak here...", scale=8, show_label=False)
104
  plus = gr.Button("βž•")
105
  mic = gr.Button("πŸŽ™οΈ")
 
106
  file_input = gr.File(file_types=[".mp3", ".wav"], visible=False)
107
 
108
  send = gr.Button("Send πŸš€")
109
+ send.click(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state, saved_chats_container])
110
+ chat_input.submit(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state, saved_chats_container])
111
 
112
  def refresh_chat_list():
113
+ files = list_saved_chats()
114
+ buttons = []
115
+ for file in files:
116
+ btn = gr.Button(value=file[:-5], scale=1)
117
+ btn.click(fn=load_chat_file, inputs=[gr.State(file)], outputs=[chatbot, state])
118
+ buttons.append(btn)
119
+ return buttons
120
 
121
  def start_stop_recording(is_recording):
122
  if not is_recording:
123
+ return "Recording...", True
124
  else:
125
+ return "", False
126
 
127
+ def handle_audio_transcription(audio):
128
+ return transcribe_audio(audio)
129
 
130
  new_chat.click(lambda: ("", [], []), outputs=[chat_input, chatbot, state])
131
  plus.click(lambda: gr.update(visible=True), outputs=[file_input])
132
  file_input.change(transcribe_audio, inputs=file_input, outputs=chat_input)
133
+ mic.click(start_stop_recording, inputs=recording, outputs=[chat_input, recording])
 
134
 
135
+ demo.load(refresh_chat_list, outputs=[saved_chats_container])
136
 
137
+ demo.launch()