shukdevdattaEX commited on
Commit
e841bb9
Β·
verified Β·
1 Parent(s): 765ede7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -33
app.py CHANGED
@@ -22,18 +22,19 @@ class MultimodalChatbot:
22
  base_url="https://openrouter.ai/api/v1",
23
  api_key=api_key,
24
  )
25
- self.model = "google/gemma-3n-e2b-it:free"
26
  self.conversation_history = []
27
  # Initialize the pipeline for image-text-to-text processing
28
  try:
29
  self.pipe = pipeline(
30
- "image-text-to-text",
31
- model="google/gemma-3n-e2b",
32
  device="cpu", # Optimized for CPU in HF Spaces
33
  torch_dtype=torch.float32, # Use float32 for CPU compatibility
34
  )
 
35
  except Exception as e:
36
- print(f"Error initializing pipeline: {e}")
37
  self.pipe = None
38
 
39
  def encode_image_to_base64(self, image) -> str:
@@ -130,21 +131,28 @@ class MultimodalChatbot:
130
 
131
  cap = cv2.VideoCapture(video_path)
132
  if not cap.isOpened():
133
- return None
134
 
135
  total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
 
 
 
 
136
  if frame_number is None:
137
  frame_number = total_frames // 2 # Extract middle frame
 
 
 
138
  cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
139
  ret, frame = cap.read()
140
  cap.release()
141
  if ret:
142
  frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
143
- return Image.fromarray(frame)
144
  else:
145
- return None
146
  except Exception as e:
147
- return None
148
 
149
  def create_multimodal_message(self,
150
  text_input: str = "",
@@ -175,11 +183,13 @@ class MultimodalChatbot:
175
  image = Image.open(image_file)
176
  else:
177
  image = image_file
178
- # Use user's text input as prompt, or default if none provided
179
- prompt = f"<image_soft_token> {text_input}" if text_input else "<image_soft_token> Describe this image"
180
- output = self.pipe(image, text=prompt)
181
- description = output[0]['generated_text']
182
- content_parts.append({"type": "text", "text": f"Image TwitchImage analysis: {description}"})
 
 
183
  processing_info.append("πŸ–ΌοΈ Image analyzed")
184
  except Exception as e:
185
  content_parts.append({"type": "text", "text": f"Error analyzing image: {str(e)}"})
@@ -189,20 +199,21 @@ class MultimodalChatbot:
189
  processing_info.append("πŸ–ΌοΈ Image received (analysis failed)")
190
 
191
  if video_file is not None and self.pipe is not None:
192
- frame = self.extract_video_frame(video_file)
193
  if frame:
194
  try:
195
- # Use user's text input with context, or default for frame
196
- prompt = f"<image_soft_token> This is a frame from the video. {text_input}" if text_input else "<image_soft_token> Describe this frame from the video"
197
- output = self.pipe(frame, text=prompt)
198
- description = output[0]['generated_text']
199
- content_parts.append({"type": "text", "text": f"Video frame analysis: {description}. Please describe the video for further assistance."})
 
200
  processing_info.append("πŸŽ₯ Video frame analyzed")
201
  except Exception as e:
202
- content_parts.append({"type": "text", "text": f"Error analyzing video frame: {str(e)}"})
203
  processing_info.append("πŸŽ₯ Video frame analysis failed")
204
  else:
205
- content_parts.append({"type": "text", "text": "Could not extract frame from video. Please describe the video."})
206
  processing_info.append("πŸŽ₯ Video processing failed")
207
  elif video_file is not None:
208
  content_parts.append({"type": "text", "text": "Video uploaded. Analysis failed due to model initialization error."})
@@ -246,7 +257,7 @@ class MultimodalChatbot:
246
  completion = self.client.chat.completions.create(
247
  extra_headers={
248
  "HTTP-Referer": "https://multimodal-chatbot.local",
249
- "X-Title": "Multimodal Chatbot",
250
  },
251
  model=self.model,
252
  messages=messages,
@@ -264,16 +275,16 @@ class MultimodalChatbot:
264
 
265
  def create_interface():
266
  """Create the Gradio interface"""
267
- with gr.Blocks(title="Multimodal Chatbot with Gemma 3n", theme=gr.themes.Soft()) as demo:
268
  gr.Markdown("""
269
- # πŸ€– Multimodal Chatbot with Gemma 3n
270
 
271
  This chatbot can process multiple types of input:
272
- - **Text**: Regular text messages
273
  - **PDF**: Extract and analyze document content
274
  - **Audio**: Transcribe speech to text (supports WAV, MP3, M4A, FLAC)
275
- - **Images**: Upload images for analysis using Gemma 3n
276
- - **Video**: Upload videos for basic frame analysis using Gemma 3n
277
 
278
  **Setup**: Enter your OpenRouter API key below to get started
279
  """)
@@ -514,14 +525,18 @@ def create_interface():
514
  api_key_input.change(
515
  validate_api_key,
516
  inputs=[api_key_input],
517
- outputs=[api_status, text_submit_btn, pdf_submit_btn, audio_submit_btn,
 
518
  image_submit_btn, video_submit_btn, combined_submit_btn]
519
  )
520
 
521
  text_submit_btn.click(
522
  process_text_input,
523
  inputs=[api_key_input, text_input, text_chatbot],
524
- outputs=[text_chatbot, text_input]
 
 
 
525
  )
526
  text_input.submit(
527
  process_text_input,
@@ -571,7 +586,7 @@ def create_interface():
571
  gr.Markdown("""
572
  ### 🎯 How to Use Each Tab:
573
 
574
- **πŸ’¬ Text Chat**: Simple text conversations with the AI
575
 
576
  **πŸ“„ PDF Chat**: Upload a PDF and ask questions about its content
577
 
@@ -579,10 +594,10 @@ def create_interface():
579
  - Supports: WAV, MP3, M4A, FLAC, OGG formats
580
  - Best results with clear speech and minimal background noise
581
 
582
- **πŸ–ΌοΈ Image Chat**: Upload images for analysis using Gemma 3n
583
  - Provide a text prompt to guide the analysis (e.g., "What is in this image?")
584
 
585
- **πŸŽ₯ Video Chat**: Upload videos for basic frame analysis using Gemma 3n
586
  - Analysis is based on a single frame; provide a text description for full video context
587
 
588
  **🌟 Combined Chat**: Use multiple input types together for comprehensive analysis
@@ -598,6 +613,7 @@ def create_interface():
598
  - Image and video analysis may be slow on CPU in Hugging Face Spaces
599
  - Video analysis is limited to a single frame due to CPU constraints
600
  - Large files may take longer to process
 
601
  """)
602
 
603
  return demo
@@ -616,7 +632,7 @@ if __name__ == "__main__":
616
  "torch"
617
  ]
618
 
619
- print("πŸš€ Multimodal Chatbot with Gemma 3n")
620
  print("=" * 50)
621
  print("Required packages:", ", ".join(required_packages))
622
  print("\nπŸ“¦ To install: pip install " + " ".join(required_packages))
 
22
  base_url="https://openrouter.ai/api/v1",
23
  api_key=api_key,
24
  )
25
+ self.model = "google/gemma-2-9b-it:free" # Updated to a valid text model
26
  self.conversation_history = []
27
  # Initialize the pipeline for image-text-to-text processing
28
  try:
29
  self.pipe = pipeline(
30
+ "image-captioning",
31
+ model="Salesforce/blip-image-captioning-base",
32
  device="cpu", # Optimized for CPU in HF Spaces
33
  torch_dtype=torch.float32, # Use float32 for CPU compatibility
34
  )
35
+ print("Image captioning pipeline initialized successfully")
36
  except Exception as e:
37
+ print(f"Error initializing image captioning pipeline: {str(e)}")
38
  self.pipe = None
39
 
40
  def encode_image_to_base64(self, image) -> str:
 
131
 
132
  cap = cv2.VideoCapture(video_path)
133
  if not cap.isOpened():
134
+ return None, "Could not open video file"
135
 
136
  total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
137
+ if total_frames <= 0:
138
+ cap.release()
139
+ return None, "Video has no frames"
140
+
141
  if frame_number is None:
142
  frame_number = total_frames // 2 # Extract middle frame
143
+ if frame_number >= total_frames:
144
+ frame_number = total_frames - 1
145
+
146
  cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
147
  ret, frame = cap.read()
148
  cap.release()
149
  if ret:
150
  frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
151
+ return Image.fromarray(frame), f"Extracted frame {frame_number} of {total_frames}"
152
  else:
153
+ return None, "Failed to extract frame"
154
  except Exception as e:
155
+ return None, f"Error extracting video frame: {str(e)}"
156
 
157
  def create_multimodal_message(self,
158
  text_input: str = "",
 
183
  image = Image.open(image_file)
184
  else:
185
  image = image_file
186
+ # Use BLIP model for image captioning
187
+ output = self.pipe(image)
188
+ description = output[0]['generated_caption']
189
+ if text_input:
190
+ content_parts.append({"type": "text", "text": f"Image analysis (based on '{text_input}'): {description}"})
191
+ else:
192
+ content_parts.append({"type": "text", "text": f"Image analysis: {description}"})
193
  processing_info.append("πŸ–ΌοΈ Image analyzed")
194
  except Exception as e:
195
  content_parts.append({"type": "text", "text": f"Error analyzing image: {str(e)}"})
 
199
  processing_info.append("πŸ–ΌοΈ Image received (analysis failed)")
200
 
201
  if video_file is not None and self.pipe is not None:
202
+ frame, frame_info = self.extract_video_frame(video_file)
203
  if frame:
204
  try:
205
+ output = self.pipe(frame)
206
+ description = output[0]['generated_caption']
207
+ if text_input:
208
+ content_parts.append({"type": "text", "text": f"Video frame analysis (based on '{text_input}'): {description}. Frame info: {frame_info}. Please describe the video for further assistance."})
209
+ else:
210
+ content_parts.append({"type": "text", "text": f"Video frame analysis: {description}. Frame info: {frame_info}. Please describe the video for further assistance."})
211
  processing_info.append("πŸŽ₯ Video frame analyzed")
212
  except Exception as e:
213
+ content_parts.append({"type": "text", "text": f"Error analyzing video frame: {str(e)}. Frame info: {frame_info}"})
214
  processing_info.append("πŸŽ₯ Video frame analysis failed")
215
  else:
216
+ content_parts.append({"type": "text", "text": f"Could not extract frame from video: {frame_info}. Please describe the video."})
217
  processing_info.append("πŸŽ₯ Video processing failed")
218
  elif video_file is not None:
219
  content_parts.append({"type": "text", "text": "Video uploaded. Analysis failed due to model initialization error."})
 
257
  completion = self.client.chat.completions.create(
258
  extra_headers={
259
  "HTTP-Referer": "https://multimodal-chatbot.local",
260
+ "X-Title": "Mult Mosaic Chatbot",
261
  },
262
  model=self.model,
263
  messages=messages,
 
275
 
276
  def create_interface():
277
  """Create the Gradio interface"""
278
+ with gr.Blocks(title="Multimodal Chatbot with BLIP and Gemma", theme=gr.themes.Soft()) as demo:
279
  gr.Markdown("""
280
+ # πŸ€– Multimodal Chatbot with BLIP and Gemma
281
 
282
  This chatbot can process multiple types of input:
283
+ - **Text**: Regular text messages using Gemma
284
  - **PDF**: Extract and analyze document content
285
  - **Audio**: Transcribe speech to text (supports WAV, MP3, M4A, FLAC)
286
+ - **Images**: Upload images for analysis using BLIP
287
+ - **Video**: Upload videos for basic frame analysis using BLIP
288
 
289
  **Setup**: Enter your OpenRouter API key below to get started
290
  """)
 
525
  api_key_input.change(
526
  validate_api_key,
527
  inputs=[api_key_input],
528
+ Β F
529
+ outputs=[api_status, text_submit_btn, pdf pimodalChatbotdf_submit_btn, audio_submit_btn,
530
  image_submit_btn, video_submit_btn, combined_submit_btn]
531
  )
532
 
533
  text_submit_btn.click(
534
  process_text_input,
535
  inputs=[api_key_input, text_input, text_chatbot],
536
+ outputs=[text_chatbot, text upset_btn.click(
537
+ process_pdf_input,
538
+ inputs=[api_key_input, pdf_input, pdf_text_input, pdf_chatbot],
539
+ outputs=[pdf_chatbot, pdf_text_input]
540
  )
541
  text_input.submit(
542
  process_text_input,
 
586
  gr.Markdown("""
587
  ### 🎯 How to Use Each Tab:
588
 
589
+ **πŸ’¬ Text Chat**: Simple text conversations with the AI using Gemma
590
 
591
  **πŸ“„ PDF Chat**: Upload a PDF and ask questions about its content
592
 
 
594
  - Supports: WAV, MP3, M4A, FLAC, OGG formats
595
  - Best results with clear speech and minimal background noise
596
 
597
+ **πŸ–ΌοΈ Image Chat**: Upload images for analysis using BLIP
598
  - Provide a text prompt to guide the analysis (e.g., "What is in this image?")
599
 
600
+ **πŸŽ₯ Video Chat**: Upload videos for basic frame analysis using BLIP
601
  - Analysis is based on a single frame; provide a text description for full video context
602
 
603
  **🌟 Combined Chat**: Use multiple input types together for comprehensive analysis
 
613
  - Image and video analysis may be slow on CPU in Hugging Face Spaces
614
  - Video analysis is limited to a single frame due to CPU constraints
615
  - Large files may take longer to process
616
+ - BLIP model may provide basic captions; detailed video descriptions require additional user input
617
  """)
618
 
619
  return demo
 
632
  "torch"
633
  ]
634
 
635
+ print("πŸš€ Multimodal Chatbot with BLIP and Gemma")
636
  print("=" * 50)
637
  print("Required packages:", ", ".join(required_packages))
638
  print("\nπŸ“¦ To install: pip install " + " ".join(required_packages))