Nymbo commited on
Commit
45b3867
·
verified ·
1 Parent(s): 57fd5c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -47
app.py CHANGED
@@ -34,7 +34,7 @@ def encode_image(image):
34
 
35
  def respond(
36
  message,
37
- images, # New parameter for uploaded images
38
  history: list[tuple[str, str]],
39
  system_message,
40
  max_tokens,
@@ -49,7 +49,7 @@ def respond(
49
  selected_model
50
  ):
51
  print(f"Received message: {message}")
52
- print(f"Received {len(images) if images else 0} images")
53
  print(f"History: {history}")
54
  print(f"System message: {system_message}")
55
  print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
@@ -77,7 +77,7 @@ def respond(
77
  seed = None
78
 
79
  # Create multimodal content if images are present
80
- if images and any(images):
81
  # Process the user message to include images
82
  user_content = []
83
 
@@ -89,16 +89,20 @@ def respond(
89
  })
90
 
91
  # Add image parts
92
- for img in images:
93
  if img is not None:
94
- encoded_image = encode_image(img)
95
- if encoded_image:
96
- user_content.append({
97
- "type": "image_url",
98
- "image_url": {
99
- "url": f"data:image/jpeg;base64,{encoded_image}"
100
- }
101
- })
 
 
 
 
102
  else:
103
  # Text-only message
104
  user_content = message
@@ -112,8 +116,36 @@ def respond(
112
  user_part = val[0]
113
  assistant_part = val[1]
114
  if user_part:
115
- messages.append({"role": "user", "content": user_part})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  print(f"Added user message to context (type: {type(user_part)})")
 
117
  if assistant_part:
118
  messages.append({"role": "assistant", "content": assistant_part})
119
  print(f"Added assistant message to context: {assistant_part}")
@@ -190,19 +222,15 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
190
  print("Chatbot interface created.")
191
 
192
  with gr.Row():
193
- # Text input for messages
194
- msg = gr.Textbox(
195
- placeholder="Type a message...",
196
  show_label=False,
197
  container=False,
198
- scale=9
199
- )
200
-
201
- # Image upload button
202
- image_upload = gr.Image(
203
- type="filepath",
204
- label="Upload Image",
205
- scale=1
206
  )
207
 
208
  # Send button for messages
@@ -367,37 +395,55 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
367
  return selected
368
 
369
  # Function for the chat interface
370
- def user(user_message, image, history):
371
- if user_message == "" and image is None:
 
372
  return history
373
 
374
- # Format image reference for display
375
- img_placeholder = ""
376
- if image is not None:
377
- img_placeholder = f"![Image]({image})"
378
 
379
- # Combine text and image reference for display
380
- display_message = f"{user_message}\n{img_placeholder}" if img_placeholder else user_message
 
381
 
382
- # Return updated history
383
- return history + [[display_message, None]]
 
 
 
 
 
 
 
 
384
 
385
  # Define chat interface
386
- def bot(history, images, system_msg, max_tokens, temperature, top_p, freq_penalty, seed, provider, api_key, custom_model, search_term, selected_model):
387
  # Extract the last user message
388
- user_message = history[-1][0] if history and len(history) > 0 else ""
 
 
 
389
 
390
- # Clean up the user message to remove image reference
391
- if "![Image]" in user_message:
392
- text_parts = user_message.split("![Image]")[0].strip()
 
 
 
 
 
 
 
393
  else:
394
  text_parts = user_message
395
 
396
  # Process message through respond function
397
  history[-1][1] = ""
398
  for response in respond(
399
- text_parts, # Send only the text part
400
- [images], # Send images separately
401
  history[:-1],
402
  system_msg,
403
  max_tokens,
@@ -417,32 +463,36 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
417
  # Event handlers
418
  msg.submit(
419
  user,
420
- [msg, image_upload, chatbot],
421
  [chatbot],
422
  queue=False
423
  ).then(
424
  bot,
425
- [chatbot, image_upload, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
426
  frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
427
  model_search_box, featured_model_radio],
428
  [chatbot]
 
 
 
 
429
  )
430
 
431
  submit_btn.click(
432
  user,
433
- [msg, image_upload, chatbot],
434
  [chatbot],
435
  queue=False
436
  ).then(
437
  bot,
438
- [chatbot, image_upload, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
439
  frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
440
  model_search_box, featured_model_radio],
441
  [chatbot]
442
  ).then(
443
- lambda: (None, "", None), # Clear inputs after submission
444
  None,
445
- [msg, msg, image_upload]
446
  )
447
 
448
  # Connect the model filter to update the radio choices
 
34
 
35
  def respond(
36
  message,
37
+ image_files, # Changed parameter name and structure
38
  history: list[tuple[str, str]],
39
  system_message,
40
  max_tokens,
 
49
  selected_model
50
  ):
51
  print(f"Received message: {message}")
52
+ print(f"Received {len(image_files) if image_files else 0} images")
53
  print(f"History: {history}")
54
  print(f"System message: {system_message}")
55
  print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
 
77
  seed = None
78
 
79
  # Create multimodal content if images are present
80
+ if image_files and len(image_files) > 0:
81
  # Process the user message to include images
82
  user_content = []
83
 
 
89
  })
90
 
91
  # Add image parts
92
+ for img in image_files:
93
  if img is not None:
94
+ # Get raw image data from path
95
+ try:
96
+ encoded_image = encode_image(img)
97
+ if encoded_image:
98
+ user_content.append({
99
+ "type": "image_url",
100
+ "image_url": {
101
+ "url": f"data:image/jpeg;base64,{encoded_image}"
102
+ }
103
+ })
104
+ except Exception as e:
105
+ print(f"Error encoding image: {e}")
106
  else:
107
  # Text-only message
108
  user_content = message
 
116
  user_part = val[0]
117
  assistant_part = val[1]
118
  if user_part:
119
+ # Handle both text-only and multimodal messages in history
120
+ if isinstance(user_part, tuple) and len(user_part) == 2:
121
+ # This is a multimodal message with text and images
122
+ history_content = []
123
+ if user_part[0]: # Text
124
+ history_content.append({
125
+ "type": "text",
126
+ "text": user_part[0]
127
+ })
128
+
129
+ for img in user_part[1]: # Images
130
+ if img:
131
+ try:
132
+ encoded_img = encode_image(img)
133
+ if encoded_img:
134
+ history_content.append({
135
+ "type": "image_url",
136
+ "image_url": {
137
+ "url": f"data:image/jpeg;base64,{encoded_img}"
138
+ }
139
+ })
140
+ except Exception as e:
141
+ print(f"Error encoding history image: {e}")
142
+
143
+ messages.append({"role": "user", "content": history_content})
144
+ else:
145
+ # Regular text message
146
+ messages.append({"role": "user", "content": user_part})
147
  print(f"Added user message to context (type: {type(user_part)})")
148
+
149
  if assistant_part:
150
  messages.append({"role": "assistant", "content": assistant_part})
151
  print(f"Added assistant message to context: {assistant_part}")
 
222
  print("Chatbot interface created.")
223
 
224
  with gr.Row():
225
+ # Multimodal textbox for messages (combines text and file uploads)
226
+ msg = gr.MultimodalTextbox(
227
+ placeholder="Type a message or upload images...",
228
  show_label=False,
229
  container=False,
230
+ scale=12,
231
+ file_types=["image"],
232
+ file_count="multiple",
233
+ sources=["upload"]
 
 
 
 
234
  )
235
 
236
  # Send button for messages
 
395
  return selected
396
 
397
  # Function for the chat interface
398
+ def user(user_message, history):
399
+ # Skip if message is empty (no text and no files)
400
+ if (not user_message["text"] or user_message["text"].strip() == "") and not user_message["files"]:
401
  return history
402
 
403
+ # Process images and text into a display message
404
+ display_message = ""
 
 
405
 
406
+ # Add text if present
407
+ if user_message["text"] and user_message["text"].strip() != "":
408
+ display_message += user_message["text"]
409
 
410
+ # Add image references if present
411
+ file_displays = []
412
+ for file in user_message["files"]:
413
+ file_displays.append(file)
414
+
415
+ # Return updated history with display message
416
+ if file_displays:
417
+ return history + [[(display_message, file_displays), None]]
418
+ else:
419
+ return history + [[display_message, None]]
420
 
421
  # Define chat interface
422
+ def bot(history, system_msg, max_tokens, temperature, top_p, freq_penalty, seed, provider, api_key, custom_model, search_term, selected_model):
423
  # Extract the last user message
424
+ if not history or len(history) == 0:
425
+ return history
426
+
427
+ user_message = history[-1][0]
428
 
429
+ # Determine if the message is multimodal or text-only
430
+ is_multimodal = False
431
+ text_parts = ""
432
+ image_files = []
433
+
434
+ # Process text and images from the message
435
+ if isinstance(user_message, tuple):
436
+ text_parts = user_message[0]
437
+ image_files = user_message[1]
438
+ is_multimodal = True
439
  else:
440
  text_parts = user_message
441
 
442
  # Process message through respond function
443
  history[-1][1] = ""
444
  for response in respond(
445
+ text_parts, # Text part
446
+ image_files if is_multimodal else None, # Image part
447
  history[:-1],
448
  system_msg,
449
  max_tokens,
 
463
  # Event handlers
464
  msg.submit(
465
  user,
466
+ [msg, chatbot],
467
  [chatbot],
468
  queue=False
469
  ).then(
470
  bot,
471
+ [chatbot, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
472
  frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
473
  model_search_box, featured_model_radio],
474
  [chatbot]
475
+ ).then(
476
+ lambda: {"text": "", "files": []}, # Clear inputs after submission
477
+ None,
478
+ [msg]
479
  )
480
 
481
  submit_btn.click(
482
  user,
483
+ [msg, chatbot],
484
  [chatbot],
485
  queue=False
486
  ).then(
487
  bot,
488
+ [chatbot, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
489
  frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
490
  model_search_box, featured_model_radio],
491
  [chatbot]
492
  ).then(
493
+ lambda: {"text": "", "files": []}, # Clear inputs after submission
494
  None,
495
+ [msg]
496
  )
497
 
498
  # Connect the model filter to update the radio choices