bluenevus commited on
Commit
f6979f1
·
1 Parent(s): 90100cc

Update app.py via AI Editor

Browse files
Files changed (1) hide show
  1. app.py +89 -58
app.py CHANGED
@@ -46,8 +46,9 @@ def get_session_state(session_id):
46
  SESSION_DATA[session_id] = {
47
  "messages": [],
48
  "uploads": [],
49
- "openai_file_ids": [],
50
- "created": datetime.datetime.utcnow().isoformat()
 
51
  }
52
  return SESSION_DATA[session_id]
53
 
@@ -153,7 +154,8 @@ def right_main_static():
153
  html.Div(id="error-message", style={"color": "#bb2124", "marginTop": "0.5rem"}),
154
  ])
155
  ], className="mt-3"),
156
- dcc.Loading(id="loading", type="default", fullscreen=False, style={"position": "absolute", "top": "5%", "left": "50%"})
 
157
  ], style={"padding": "1rem", "backgroundColor": "#fff", "height": "100vh", "overflowY": "auto"})
158
 
159
  app.layout = html.Div([
@@ -165,25 +167,8 @@ app.layout = html.Div([
165
  ], style={"display": "flex"})
166
  ])
167
 
168
- def _upload_file_to_openai(file_path, purpose="assistants"):
169
- try:
170
- with open(file_path, 'rb') as f:
171
- res = openai.File.create(
172
- file=f,
173
- purpose=purpose
174
- )
175
- logger.info(f"Uploaded file to OpenAI: {res.id}")
176
- return res.id
177
- except Exception as e:
178
- logger.error(f"Failed to upload file to OpenAI: {e}")
179
- return None
180
-
181
- def _get_openai_file_ids(session_state):
182
- return session_state.get("openai_file_ids", [])
183
-
184
  def _is_supported_doc(filename):
185
  ext = os.path.splitext(filename)[1].lower()
186
- # OpenAI supports: txt, pdf, docx, md for assistants file search
187
  return ext in [".txt", ".pdf", ".md", ".docx"]
188
 
189
  @app.callback(
@@ -203,14 +188,17 @@ def assign_session_id(_):
203
  Output("chat-history-list", "children"),
204
  Output("chat-window", "children"),
205
  Output("error-message", "children"),
 
 
206
  Input("session-id", "data"),
207
  Input("send-btn", "n_clicks"),
208
  Input("file-upload", "contents"),
209
  State("file-upload", "filename"),
210
  State("user-input", "value"),
 
211
  prevent_initial_call=False
212
  )
213
- def main_callback(session_id, send_clicks, file_contents, file_names, user_input):
214
  trigger = callback_context.triggered[0]['prop_id'].split('.')[0] if callback_context.triggered else ""
215
  if not session_id:
216
  session_id = get_session_id()
@@ -219,6 +207,7 @@ def main_callback(session_id, send_clicks, file_contents, file_names, user_input
219
  load_session_state(session_id)
220
  state = get_session_state(session_id)
221
  error = ""
 
222
 
223
  if trigger == "file-upload" and file_contents and file_names:
224
  uploads = []
@@ -235,64 +224,106 @@ def main_callback(session_id, send_clicks, file_contents, file_names, user_input
235
  with open(fp, "wb") as f:
236
  f.write(base64.b64decode(data))
237
  uploads.append({"name": fname, "is_img": is_img, "path": fp})
238
- # If document is supported, upload to OpenAI
239
- if _is_supported_doc(fname):
240
- file_id = _upload_file_to_openai(fp)
241
- if file_id:
242
- if "openai_file_ids" not in state:
243
- state["openai_file_ids"] = []
244
- state["openai_file_ids"].append(file_id)
245
  state["uploads"].extend(uploads)
246
  save_session_state(session_id)
247
  logger.info(f"Session {session_id}: Uploaded files {[u['name'] for u in uploads]}")
248
 
249
  if trigger == "send-btn" and user_input and user_input.strip():
250
  state["messages"].append({"role": "user", "content": user_input})
251
- try:
252
- file_ids = _get_openai_file_ids(state)
253
- system_prompt = load_system_prompt()
254
- messages = [
255
- {"role": "system", "content": system_prompt},
256
- ]
257
- for m in state["messages"]:
258
- messages.append({"role": m["role"], "content": m["content"]})
259
- if file_ids:
260
- response = openai.ChatCompletion.create(
261
- model="gpt-3.5-turbo-1106",
262
- messages=messages,
263
- tools=[{"type": "file_search"}],
264
- tool_choice="file_search",
265
- file_ids=file_ids,
266
- max_tokens=700,
267
- temperature=0.2,
268
- )
269
- else:
270
  response = openai.ChatCompletion.create(
271
  model="gpt-3.5-turbo",
272
- messages=messages,
273
  max_tokens=700,
274
  temperature=0.2,
 
275
  )
276
- reply = response.choices[0].message.content
277
- state["messages"].append({"role": "assistant", "content": reply})
278
- logger.info(f"Session {session_id}: User: {user_input} | Assistant: {reply}")
279
- error = ""
280
- except Exception as e:
281
- error = f"Error: {e}"
282
- logger.error(f"Session {session_id}: {error}")
283
- save_session_state(session_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
 
285
  chat_history = state.get("messages", [])
286
  uploads = state.get("uploads", [])
287
  upload_cards = [uploaded_file_card(os.path.basename(f["name"]), f["is_img"]) for f in uploads]
288
  chat_items = [html.Li(html.Span((msg['role'] + ": " + msg['content'])[:40] + ("..." if len(msg['content']) > 40 else ""), style={"fontSize": "0.92rem"})) for msg in chat_history[-6:]]
289
  chat_cards = []
290
- for msg in chat_history:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
  if msg['role'] == "user":
292
  chat_cards.append(chat_message_card(msg['content'], is_user=True))
293
  elif msg['role'] == "assistant":
294
  chat_cards.append(chat_message_card(msg['content'], is_user=False))
295
- return upload_cards, chat_items, chat_cards, error
 
 
 
 
296
 
297
  @app_flask.after_request
298
  def set_session_cookie(resp):
 
46
  SESSION_DATA[session_id] = {
47
  "messages": [],
48
  "uploads": [],
49
+ "created": datetime.datetime.utcnow().isoformat(),
50
+ "streaming": False,
51
+ "stream_buffer": "",
52
  }
53
  return SESSION_DATA[session_id]
54
 
 
154
  html.Div(id="error-message", style={"color": "#bb2124", "marginTop": "0.5rem"}),
155
  ])
156
  ], className="mt-3"),
157
+ dcc.Loading(id="loading", type="default", fullscreen=False, style={"position": "absolute", "top": "5%", "left": "50%"}),
158
+ dcc.Interval(id="stream-interval", interval=400, n_intervals=0, disabled=True, max_intervals=1000)
159
  ], style={"padding": "1rem", "backgroundColor": "#fff", "height": "100vh", "overflowY": "auto"})
160
 
161
  app.layout = html.Div([
 
167
  ], style={"display": "flex"})
168
  ])
169
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
  def _is_supported_doc(filename):
171
  ext = os.path.splitext(filename)[1].lower()
 
172
  return ext in [".txt", ".pdf", ".md", ".docx"]
173
 
174
  @app.callback(
 
188
  Output("chat-history-list", "children"),
189
  Output("chat-window", "children"),
190
  Output("error-message", "children"),
191
+ Output("stream-interval", "disabled"),
192
+ Output("stream-interval", "n_intervals"),
193
  Input("session-id", "data"),
194
  Input("send-btn", "n_clicks"),
195
  Input("file-upload", "contents"),
196
  State("file-upload", "filename"),
197
  State("user-input", "value"),
198
+ State("stream-interval", "n_intervals"),
199
  prevent_initial_call=False
200
  )
201
+ def main_callback(session_id, send_clicks, file_contents, file_names, user_input, stream_n):
202
  trigger = callback_context.triggered[0]['prop_id'].split('.')[0] if callback_context.triggered else ""
203
  if not session_id:
204
  session_id = get_session_id()
 
207
  load_session_state(session_id)
208
  state = get_session_state(session_id)
209
  error = ""
210
+ start_streaming = False
211
 
212
  if trigger == "file-upload" and file_contents and file_names:
213
  uploads = []
 
224
  with open(fp, "wb") as f:
225
  f.write(base64.b64decode(data))
226
  uploads.append({"name": fname, "is_img": is_img, "path": fp})
 
 
 
 
 
 
 
227
  state["uploads"].extend(uploads)
228
  save_session_state(session_id)
229
  logger.info(f"Session {session_id}: Uploaded files {[u['name'] for u in uploads]}")
230
 
231
  if trigger == "send-btn" and user_input and user_input.strip():
232
  state["messages"].append({"role": "user", "content": user_input})
233
+ state["streaming"] = True
234
+ state["stream_buffer"] = ""
235
+ save_session_state(session_id)
236
+
237
+ def run_stream(session_id, messages):
238
+ try:
239
+ system_prompt = load_system_prompt()
240
+ msg_list = [{"role": "system", "content": system_prompt}]
241
+ for m in messages:
242
+ msg_list.append({"role": m["role"], "content": m["content"]})
 
 
 
 
 
 
 
 
 
243
  response = openai.ChatCompletion.create(
244
  model="gpt-3.5-turbo",
245
+ messages=msg_list,
246
  max_tokens=700,
247
  temperature=0.2,
248
+ stream=True,
249
  )
250
+ reply = ""
251
+ for chunk in response:
252
+ delta = chunk["choices"][0]["delta"]
253
+ content = delta.get("content", "")
254
+ if content:
255
+ reply += content
256
+ # Update buffer in session state
257
+ session_lock = get_session_lock(session_id)
258
+ with session_lock:
259
+ load_session_state(session_id)
260
+ state = get_session_state(session_id)
261
+ state["stream_buffer"] = reply
262
+ save_session_state(session_id)
263
+ # Finalize message
264
+ session_lock = get_session_lock(session_id)
265
+ with session_lock:
266
+ load_session_state(session_id)
267
+ state = get_session_state(session_id)
268
+ state["messages"].append({"role": "assistant", "content": reply})
269
+ state["stream_buffer"] = ""
270
+ state["streaming"] = False
271
+ save_session_state(session_id)
272
+ logger.info(f"Session {session_id}: User: {user_input} | Assistant: {reply}")
273
+ except Exception as e:
274
+ session_lock = get_session_lock(session_id)
275
+ with session_lock:
276
+ load_session_state(session_id)
277
+ state = get_session_state(session_id)
278
+ state["streaming"] = False
279
+ state["stream_buffer"] = ""
280
+ save_session_state(session_id)
281
+ logger.error(f"Session {session_id}: Streaming error: {e}")
282
+
283
+ threading.Thread(target=run_stream, args=(session_id, list(state["messages"])), daemon=True).start()
284
+ start_streaming = True
285
 
286
  chat_history = state.get("messages", [])
287
  uploads = state.get("uploads", [])
288
  upload_cards = [uploaded_file_card(os.path.basename(f["name"]), f["is_img"]) for f in uploads]
289
  chat_items = [html.Li(html.Span((msg['role'] + ": " + msg['content'])[:40] + ("..." if len(msg['content']) > 40 else ""), style={"fontSize": "0.92rem"})) for msg in chat_history[-6:]]
290
  chat_cards = []
291
+ for i, msg in enumerate(chat_history):
292
+ if msg['role'] == "user":
293
+ chat_cards.append(chat_message_card(msg['content'], is_user=True))
294
+ elif msg['role'] == "assistant":
295
+ chat_cards.append(chat_message_card(msg['content'], is_user=False))
296
+ if state.get("streaming", False):
297
+ # Add a partial assistant message at the end
298
+ if state.get("stream_buffer", ""):
299
+ chat_cards.append(chat_message_card(state["stream_buffer"], is_user=False))
300
+ return upload_cards, chat_items, chat_cards, error, False, 0 if not start_streaming else False, 0
301
+ return upload_cards, chat_items, chat_cards, error, (not state.get("streaming", False)), 0
302
+
303
+ @app.callback(
304
+ Output("chat-window", "children"),
305
+ Output("stream-interval", "disabled"),
306
+ Input("stream-interval", "n_intervals"),
307
+ State("session-id", "data"),
308
+ prevent_initial_call=True
309
+ )
310
+ def poll_stream(n_intervals, session_id):
311
+ session_lock = get_session_lock(session_id)
312
+ with session_lock:
313
+ load_session_state(session_id)
314
+ state = get_session_state(session_id)
315
+ chat_history = state.get("messages", [])
316
+ chat_cards = []
317
+ for i, msg in enumerate(chat_history):
318
  if msg['role'] == "user":
319
  chat_cards.append(chat_message_card(msg['content'], is_user=True))
320
  elif msg['role'] == "assistant":
321
  chat_cards.append(chat_message_card(msg['content'], is_user=False))
322
+ if state.get("streaming", False):
323
+ if state.get("stream_buffer", ""):
324
+ chat_cards.append(chat_message_card(state["stream_buffer"], is_user=False))
325
+ return chat_cards, False
326
+ return chat_cards, True
327
 
328
  @app_flask.after_request
329
  def set_session_cookie(resp):