saakshigupta commited on
Commit
398e440
Β·
verified Β·
1 Parent(s): 967c6d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -572
app.py CHANGED
@@ -69,6 +69,9 @@ custom_instruction = st.sidebar.text_area(
69
  help="Add specific instructions for the LLM analysis"
70
  )
71
 
 
 
 
72
  # About section in sidebar
73
  st.sidebar.markdown("---")
74
  st.sidebar.subheader("About")
@@ -88,337 +91,12 @@ The system looks for:
88
  - Blending problems
89
  """)
90
 
91
- # ----- GradCAM Implementation -----
92
-
93
- class ImageDataset(torch.utils.data.Dataset):
94
- def __init__(self, image, transform=None, face_only=True, dataset_name=None):
95
- self.image = image
96
- self.transform = transform
97
- self.face_only = face_only
98
- self.dataset_name = dataset_name
99
- # Load face detector
100
- self.face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
101
-
102
- def __len__(self):
103
- return 1 # Only one image
104
-
105
- def detect_face(self, image_np):
106
- """Detect face in image and return the face region"""
107
- gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
108
- faces = self.face_detector.detectMultiScale(gray, 1.1, 5)
109
-
110
- # If no face is detected, use the whole image
111
- if len(faces) == 0:
112
- st.info("No face detected, using whole image for analysis")
113
- h, w = image_np.shape[:2]
114
- return (0, 0, w, h), image_np
115
-
116
- # Get the largest face
117
- if len(faces) > 1:
118
- # Choose the largest face by area
119
- areas = [w*h for (x, y, w, h) in faces]
120
- largest_idx = np.argmax(areas)
121
- x, y, w, h = faces[largest_idx]
122
- else:
123
- x, y, w, h = faces[0]
124
-
125
- # Add padding around the face (5% on each side)
126
- padding_x = int(w * 0.05)
127
- padding_y = int(h * 0.05)
128
-
129
- # Ensure padding doesn't go outside image bounds
130
- x1 = max(0, x - padding_x)
131
- y1 = max(0, y - padding_y)
132
- x2 = min(image_np.shape[1], x + w + padding_x)
133
- y2 = min(image_np.shape[0], y + h + padding_y)
134
-
135
- # Extract the face region
136
- face_img = image_np[y1:y2, x1:x2]
137
-
138
- return (x1, y1, x2-x1, y2-y1), face_img
139
-
140
- def __getitem__(self, idx):
141
- image_np = np.array(self.image)
142
- label = 0 # Default label; will be overridden by prediction
143
-
144
- # Store original image for visualization
145
- original_image = self.image.copy()
146
-
147
- # Detect face if required
148
- if self.face_only:
149
- face_box, face_img_np = self.detect_face(image_np)
150
- face_img = Image.fromarray(face_img_np)
151
-
152
- # Apply transform to face image
153
- if self.transform:
154
- face_tensor = self.transform(face_img)
155
- else:
156
- face_tensor = transforms.ToTensor()(face_img)
157
-
158
- return face_tensor, label, "uploaded_image", original_image, face_box, self.dataset_name
159
- else:
160
- # Process the whole image
161
- if self.transform:
162
- image_tensor = self.transform(self.image)
163
- else:
164
- image_tensor = transforms.ToTensor()(self.image)
165
-
166
- return image_tensor, label, "uploaded_image", original_image, None, self.dataset_name
167
-
168
- class GradCAM:
169
- def __init__(self, model, target_layer):
170
- self.model = model
171
- self.target_layer = target_layer
172
- self.gradients = None
173
- self.activations = None
174
- self._register_hooks()
175
-
176
- def _register_hooks(self):
177
- def forward_hook(module, input, output):
178
- if isinstance(output, tuple):
179
- self.activations = output[0]
180
- else:
181
- self.activations = output
182
-
183
- def backward_hook(module, grad_in, grad_out):
184
- if isinstance(grad_out, tuple):
185
- self.gradients = grad_out[0]
186
- else:
187
- self.gradients = grad_out
188
-
189
- layer = dict([*self.model.named_modules()])[self.target_layer]
190
- layer.register_forward_hook(forward_hook)
191
- layer.register_backward_hook(backward_hook)
192
-
193
- def generate(self, input_tensor, class_idx):
194
- self.model.zero_grad()
195
-
196
- try:
197
- # Use only the vision part of the model for gradient calculation
198
- vision_outputs = self.model.vision_model(pixel_values=input_tensor)
199
-
200
- # Get the pooler output
201
- features = vision_outputs.pooler_output
202
-
203
- # Create a dummy gradient for the feature based on the class idx
204
- one_hot = torch.zeros_like(features)
205
- one_hot[0, class_idx] = 1
206
-
207
- # Manually backpropagate
208
- features.backward(gradient=one_hot)
209
-
210
- # Check for None values
211
- if self.gradients is None or self.activations is None:
212
- st.warning("Warning: Gradients or activations are None. Using fallback CAM.")
213
- return np.ones((14, 14), dtype=np.float32) * 0.5
214
-
215
- # Process gradients and activations for transformer-based model
216
- gradients = self.gradients.cpu().detach().numpy()
217
- activations = self.activations.cpu().detach().numpy()
218
-
219
- if len(activations.shape) == 3: # [batch, sequence_length, hidden_dim]
220
- seq_len = activations.shape[1]
221
-
222
- # CLIP ViT typically has 196 patch tokens (14Γ—14) + 1 class token = 197
223
- if seq_len >= 197:
224
- # Skip the class token (first token) and reshape the patch tokens into a square
225
- patch_tokens = activations[0, 1:197, :] # Remove the class token
226
- # Take the mean across the hidden dimension
227
- token_importance = np.mean(np.abs(patch_tokens), axis=1)
228
- # Reshape to the expected grid size (14Γ—14 for CLIP ViT)
229
- cam = token_importance.reshape(14, 14)
230
- else:
231
- # Try to find factors close to a square
232
- side_len = int(np.sqrt(seq_len))
233
- # Use the mean across features as importance
234
- token_importance = np.mean(np.abs(activations[0]), axis=1)
235
- # Create as square-like shape as possible
236
- cam = np.zeros((side_len, side_len))
237
- # Fill the cam with available values
238
- flat_cam = cam.flatten()
239
- flat_cam[:min(len(token_importance), len(flat_cam))] = token_importance[:min(len(token_importance), len(flat_cam))]
240
- cam = flat_cam.reshape(side_len, side_len)
241
- else:
242
- # Fallback
243
- st.info("Using fallback CAM shape (14x14)")
244
- cam = np.ones((14, 14), dtype=np.float32) * 0.5 # Default fallback
245
-
246
- # Ensure we have valid values
247
- cam = np.maximum(cam, 0)
248
- if np.max(cam) > 0:
249
- cam = cam / np.max(cam)
250
-
251
- return cam
252
-
253
- except Exception as e:
254
- st.error(f"Error in GradCAM.generate: {str(e)}")
255
- return np.ones((14, 14), dtype=np.float32) * 0.5
256
-
257
- def overlay_cam_on_image(image, cam, face_box=None, alpha=0.5):
258
- """Overlay the CAM on the image"""
259
- if face_box is not None:
260
- x, y, w, h = face_box
261
- # Create a mask for the entire image (all zeros initially)
262
- img_np = np.array(image)
263
- full_h, full_w = img_np.shape[:2]
264
- full_cam = np.zeros((full_h, full_w), dtype=np.float32)
265
-
266
- # Resize CAM to match face region
267
- face_cam = cv2.resize(cam, (w, h))
268
-
269
- # Copy the face CAM into the full image CAM at the face position
270
- full_cam[y:y+h, x:x+w] = face_cam
271
-
272
- # Convert full CAM to image
273
- cam_resized = Image.fromarray((full_cam * 255).astype(np.uint8))
274
- cam_colormap = plt.cm.jet(np.array(cam_resized) / 255.0)[:, :, :3] # Apply colormap
275
- cam_colormap = (cam_colormap * 255).astype(np.uint8)
276
- else:
277
- # Resize CAM to match image dimensions
278
- img_np = np.array(image)
279
- h, w = img_np.shape[:2]
280
- cam_resized = cv2.resize(cam, (w, h))
281
-
282
- # Apply colormap
283
- cam_colormap = plt.cm.jet(cam_resized)[:, :, :3] # Apply colormap
284
- cam_colormap = (cam_colormap * 255).astype(np.uint8)
285
-
286
- # Blend the original image with the colormap
287
- img_np_float = img_np.astype(float) / 255.0
288
- cam_colormap_float = cam_colormap.astype(float) / 255.0
289
-
290
- blended = img_np_float * (1 - alpha) + cam_colormap_float * alpha
291
- blended = (blended * 255).astype(np.uint8)
292
-
293
- return Image.fromarray(blended)
294
-
295
- def save_comparison(image, cam, overlay, face_box=None):
296
- """Create a side-by-side comparison of the original, CAM, and overlay"""
297
- fig, axes = plt.subplots(1, 3, figsize=(15, 5))
298
-
299
- # Original Image
300
- axes[0].imshow(image)
301
- axes[0].set_title("Original")
302
- if face_box is not None:
303
- x, y, w, h = face_box
304
- rect = plt.Rectangle((x, y), w, h, edgecolor='lime', linewidth=2, fill=False)
305
- axes[0].add_patch(rect)
306
- axes[0].axis("off")
307
-
308
- # CAM
309
- if face_box is not None:
310
- # Create a full image CAM that highlights only the face
311
- img_np = np.array(image)
312
- h, w = img_np.shape[:2]
313
- full_cam = np.zeros((h, w))
314
-
315
- x, y, fw, fh = face_box
316
- # Resize CAM to face size
317
- face_cam = cv2.resize(cam, (fw, fh))
318
- # Place it in the right position
319
- full_cam[y:y+fh, x:x+fw] = face_cam
320
- axes[1].imshow(full_cam, cmap="jet")
321
- else:
322
- cam_resized = cv2.resize(cam, (image.width, image.height))
323
- axes[1].imshow(cam_resized, cmap="jet")
324
- axes[1].set_title("CAM")
325
- axes[1].axis("off")
326
-
327
- # Overlay
328
- axes[2].imshow(overlay)
329
- axes[2].set_title("Overlay")
330
- axes[2].axis("off")
331
-
332
- plt.tight_layout()
333
-
334
- # Convert plot to PIL Image for Streamlit display
335
- buf = io.BytesIO()
336
- plt.savefig(buf, format="png", bbox_inches="tight")
337
- plt.close()
338
- buf.seek(0)
339
- return Image.open(buf)
340
-
341
- # Function to load GradCAM CLIP model
342
- @st.cache_resource
343
- def load_clip_model():
344
- with st.spinner("Loading CLIP model for GradCAM..."):
345
- model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
346
-
347
- # Apply a simple classification head
348
- model.classification_head = nn.Linear(1024, 2)
349
- model.classification_head.weight.data.normal_(mean=0.0, std=0.02)
350
- model.classification_head.bias.data.zero_()
351
-
352
- model.eval()
353
- return model
354
-
355
- def get_target_layer_clip(model):
356
- """Get the target layer for GradCAM"""
357
- return "vision_model.encoder.layers.23"
358
 
359
- def process_image_with_gradcam(image, model, device, pred_class):
360
- """Process an image with GradCAM"""
361
- # Set up transformations
362
- transform = transforms.Compose([
363
- transforms.Resize((224, 224)),
364
- transforms.ToTensor(),
365
- transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
366
- ])
367
-
368
- # Create dataset for the single image
369
- dataset = ImageDataset(image, transform=transform, face_only=True)
370
-
371
- # Custom collate function
372
- def custom_collate(batch):
373
- tensors = [item[0] for item in batch]
374
- labels = [item[1] for item in batch]
375
- paths = [item[2] for item in batch]
376
- images = [item[3] for item in batch]
377
- face_boxes = [item[4] for item in batch]
378
- dataset_names = [item[5] for item in batch]
379
-
380
- tensors = torch.stack(tensors)
381
- labels = torch.tensor(labels)
382
-
383
- return tensors, labels, paths, images, face_boxes, dataset_names
384
-
385
- # Create dataloader
386
- dataloader = DataLoader(dataset, batch_size=1, shuffle=False, collate_fn=custom_collate)
387
-
388
- # Extract the batch
389
- for batch in dataloader:
390
- input_tensor, label, img_paths, original_images, face_boxes, dataset_names = batch
391
- original_image = original_images[0]
392
- face_box = face_boxes[0]
393
-
394
- # Move tensors and model to device
395
- input_tensor = input_tensor.to(device)
396
- model = model.to(device)
397
-
398
- try:
399
- # Create GradCAM extractor
400
- target_layer = get_target_layer_clip(model)
401
- cam_extractor = GradCAM(model, target_layer)
402
-
403
- # Generate CAM
404
- cam = cam_extractor.generate(input_tensor, pred_class)
405
-
406
- # Create visualizations
407
- overlay = overlay_cam_on_image(original_image, cam, face_box)
408
- comparison = save_comparison(original_image, cam, overlay, face_box)
409
-
410
- # Return results
411
- return cam, overlay, comparison, face_box
412
-
413
- except Exception as e:
414
- st.error(f"Error processing image with GradCAM: {str(e)}")
415
- # Return default values
416
- default_cam = np.ones((14, 14), dtype=np.float32) * 0.5
417
- overlay = overlay_cam_on_image(original_image, default_cam, face_box)
418
- comparison = save_comparison(original_image, default_cam, overlay, face_box)
419
- return default_cam, overlay, comparison, face_box
420
 
421
- # ----- BLIP Image Captioning -----
422
 
423
  # Function to load BLIP captioning model
424
  @st.cache_resource
@@ -432,39 +110,8 @@ def load_blip_model():
432
  st.error(f"Error loading BLIP model: {str(e)}")
433
  return None, None
434
 
435
- # Define custom prompts for original and GradCAM images
436
- ORIGINAL_IMAGE_PROMPT = """Generate a detailed description of this image with the following structure:
437
- Subject: [Describe the person/main subject]
438
- Appearance: [Describe clothing, hair, facial features]
439
- Pose: [Describe the person's pose and expression]
440
- Background: [Describe the environment and setting]
441
- Lighting: [Describe lighting conditions and shadows]
442
- Colors: [Note dominant colors and color palette]
443
- Notable Elements: [Any distinctive objects or visual elements]"""
444
-
445
- GRADCAM_IMAGE_PROMPT = """Describe the GradCAM visualization overlay with the following structure:
446
- Main Focus Area: [Identify the primary region highlighted]
447
- High Activation Regions: [Describe red/yellow areas and corresponding image features]
448
- Medium Activation Regions: [Describe green/cyan areas and corresponding image features]
449
- Low Activation Regions: [Describe blue/dark blue areas and corresponding image features]
450
- Activation Pattern: [Describe the overall pattern of the heatmap]"""
451
-
452
- # Function to generate image caption
453
  def generate_image_caption(image, processor, model, is_gradcam=False, max_length=75, num_beams=5):
454
- """
455
- Generate a caption for the input image using BLIP model
456
-
457
- Args:
458
- image (PIL.Image): Input image
459
- processor: BLIP processor
460
- model: BLIP model
461
- is_gradcam (bool): Whether the image is a GradCAM visualization
462
- max_length (int): Maximum length of the caption
463
- num_beams (int): Number of beams for beam search
464
-
465
- Returns:
466
- str: Generated caption
467
- """
468
  try:
469
  # Select the appropriate prompt based on image type
470
  prompt = GRADCAM_IMAGE_PROMPT if is_gradcam else ORIGINAL_IMAGE_PROMPT
@@ -493,101 +140,11 @@ def generate_image_caption(image, processor, model, is_gradcam=False, max_length
493
  st.error(f"Error generating caption: {str(e)}")
494
  return "Error generating caption"
495
 
496
- # ----- Fine-tuned Vision LLM -----
497
-
498
- # Function to fix cross-attention masks
499
- def fix_cross_attention_mask(inputs):
500
- if 'cross_attention_mask' in inputs and 0 in inputs['cross_attention_mask'].shape:
501
- batch_size, seq_len, _, num_tiles = inputs['cross_attention_mask'].shape
502
- visual_features = 6404 # Critical dimension
503
- new_mask = torch.ones((batch_size, seq_len, visual_features, num_tiles),
504
- device=inputs['cross_attention_mask'].device)
505
- inputs['cross_attention_mask'] = new_mask
506
- st.success("Fixed cross-attention mask dimensions")
507
- return inputs
508
-
509
- # Load model function
510
- @st.cache_resource
511
- def load_llm_model():
512
- with st.spinner("Loading LLM vision model... This may take a few minutes. Please be patient..."):
513
- try:
514
- # Check for GPU
515
- has_gpu = check_gpu()
516
-
517
- # Load base model and tokenizer using Unsloth
518
- base_model_id = "unsloth/llama-3.2-11b-vision-instruct"
519
- model, tokenizer = FastVisionModel.from_pretrained(
520
- base_model_id,
521
- load_in_4bit=True,
522
- )
523
-
524
- # Load the adapter
525
- adapter_id = "saakshigupta/deepfake-explainer-1"
526
- model = PeftModel.from_pretrained(model, adapter_id)
527
-
528
- # Set to inference mode
529
- FastVisionModel.for_inference(model)
530
-
531
- return model, tokenizer
532
- except Exception as e:
533
- st.error(f"Error loading model: {str(e)}")
534
- return None, None
535
-
536
- # Analyze image function
537
- def analyze_image_with_llm(image, gradcam_overlay, face_box, pred_label, confidence, question, model, tokenizer, temperature=0.7, max_tokens=500, custom_instruction=""):
538
- # Create a prompt that includes GradCAM information
539
- if custom_instruction.strip():
540
- full_prompt = f"{question}\n\nThe image has been processed with GradCAM and classified as {pred_label} with confidence {confidence:.2f}. Focus on the highlighted regions in red/yellow which show the areas the detection model found suspicious.\n\n{custom_instruction}"
541
- else:
542
- full_prompt = f"{question}\n\nThe image has been processed with GradCAM and classified as {pred_label} with confidence {confidence:.2f}. Focus on the highlighted regions in red/yellow which show the areas the detection model found suspicious."
543
-
544
- # Format the message to include both the original image and the GradCAM visualization
545
- messages = [
546
- {"role": "user", "content": [
547
- {"type": "image", "image": image}, # Original image
548
- {"type": "image", "image": gradcam_overlay}, # GradCAM overlay
549
- {"type": "text", "text": full_prompt}
550
- ]}
551
- ]
552
-
553
- # Apply chat template
554
- input_text = tokenizer.apply_chat_template(messages, add_generation_prompt=True)
555
-
556
- # Process with image
557
- inputs = tokenizer(
558
- [image, gradcam_overlay], # Send both images
559
- input_text,
560
- add_special_tokens=False,
561
- return_tensors="pt",
562
- ).to(model.device)
563
-
564
- # Fix cross-attention mask if needed
565
- inputs = fix_cross_attention_mask(inputs)
566
-
567
- # Generate response
568
- with st.spinner("Generating detailed analysis... (this may take 15-30 seconds)"):
569
- with torch.no_grad():
570
- output_ids = model.generate(
571
- **inputs,
572
- max_new_tokens=max_tokens,
573
- use_cache=True,
574
- temperature=temperature,
575
- top_p=0.9
576
- )
577
-
578
- # Decode the output
579
- response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
580
-
581
- # Try to extract just the model's response (after the prompt)
582
- if full_prompt in response:
583
- result = response.split(full_prompt)[-1].strip()
584
- else:
585
- result = response
586
-
587
- return result
588
-
589
  # Main app
590
  def main():
 
 
 
591
  # Create placeholders for model state
592
  if 'clip_model_loaded' not in st.session_state:
593
  st.session_state.clip_model_loaded = False
@@ -605,148 +162,56 @@ def main():
605
 
606
  # Create expanders for each stage
607
  with st.expander("Stage 1: Model Loading", expanded=True):
 
 
608
  # Button for loading models
609
  clip_col, llm_col, blip_col = st.columns(3)
610
 
611
  with clip_col:
612
  if not st.session_state.clip_model_loaded:
613
  if st.button("πŸ“₯ Load CLIP Model for Detection", type="primary"):
614
- # Load CLIP model
615
- model = load_clip_model()
616
- if model is not None:
617
- st.session_state.clip_model = model
618
- st.session_state.clip_model_loaded = True
619
- st.success("βœ… CLIP model loaded successfully!")
620
- else:
621
- st.error("❌ Failed to load CLIP model.")
622
  else:
623
  st.success("βœ… CLIP model loaded and ready!")
624
 
625
  with llm_col:
626
  if not st.session_state.llm_model_loaded:
627
  if st.button("πŸ“₯ Load Vision LLM for Analysis", type="primary"):
628
- # Load LLM model
629
- model, tokenizer = load_llm_model()
630
- if model is not None and tokenizer is not None:
631
- st.session_state.llm_model = model
632
- st.session_state.tokenizer = tokenizer
633
- st.session_state.llm_model_loaded = True
634
- st.success("βœ… Vision LLM loaded successfully!")
635
- else:
636
- st.error("❌ Failed to load Vision LLM.")
637
  else:
638
  st.success("βœ… Vision LLM loaded and ready!")
639
 
640
  with blip_col:
641
  if not st.session_state.blip_model_loaded:
642
  if st.button("πŸ“₯ Load BLIP for Captioning", type="primary"):
643
- # Load BLIP model
644
- processor, model = load_blip_model()
645
- if model is not None and processor is not None:
646
- st.session_state.blip_processor = processor
647
- st.session_state.blip_model = model
648
- st.session_state.blip_model_loaded = True
649
- st.success("βœ… BLIP captioning model loaded successfully!")
650
- else:
651
- st.error("❌ Failed to load BLIP model.")
652
  else:
653
  st.success("βœ… BLIP captioning model loaded and ready!")
654
 
655
  # Image upload section
656
- with st.expander("Stage 2: Image Upload & Initial Detection", expanded=True):
657
  st.subheader("Upload an Image")
658
  uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
659
 
660
  if uploaded_file is not None:
 
661
  # Display the uploaded image
662
- image = Image.open(uploaded_file).convert("RGB")
663
- st.image(image, caption="Uploaded Image", use_column_width=True)
664
-
665
- # Generate detailed caption for original image if BLIP model is loaded
666
- if st.session_state.blip_model_loaded:
667
- with st.spinner("Generating detailed image description..."):
668
- caption = generate_image_caption(
669
- image,
670
- st.session_state.blip_processor,
671
- st.session_state.blip_model,
672
- is_gradcam=False
673
- )
674
- st.session_state.image_caption = caption
675
- st.success(f"πŸ“ Image Description Generated")
676
-
677
- # Format the caption nicely
678
- st.markdown("### Image Description:")
679
- st.markdown(caption)
680
-
681
- # Detect with CLIP model if loaded
682
- if st.session_state.clip_model_loaded:
683
- with st.spinner("Analyzing image with CLIP model..."):
684
- # Preprocess image for CLIP
685
- transform = transforms.Compose([
686
- transforms.Resize((224, 224)),
687
- transforms.ToTensor(),
688
- transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
689
- ])
690
-
691
- # Create a simple dataset for the image
692
- dataset = ImageDataset(image, transform=transform, face_only=True)
693
- tensor, _, _, _, face_box, _ = dataset[0]
694
- tensor = tensor.unsqueeze(0)
695
-
696
- # Get device
697
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
698
-
699
- # Move model and tensor to device
700
- model = st.session_state.clip_model.to(device)
701
- tensor = tensor.to(device)
702
-
703
- # Forward pass
704
- with torch.no_grad():
705
- outputs = model.vision_model(pixel_values=tensor).pooler_output
706
- logits = model.classification_head(outputs)
707
- probs = torch.softmax(logits, dim=1)[0]
708
- pred_class = torch.argmax(probs).item()
709
- confidence = probs[pred_class].item()
710
- pred_label = "Fake" if pred_class == 1 else "Real"
711
-
712
- # Display results
713
- result_col1, result_col2 = st.columns(2)
714
- with result_col1:
715
- st.metric("Prediction", pred_label)
716
- with result_col2:
717
- st.metric("Confidence", f"{confidence:.2%}")
718
-
719
- # GradCAM visualization
720
- st.subheader("GradCAM Visualization")
721
- cam, overlay, comparison, detected_face_box = process_image_with_gradcam(
722
- image, model, device, pred_class
723
- )
724
-
725
- # Display GradCAM results
726
- st.image(comparison, caption="Original | CAM | Overlay", use_column_width=True)
727
-
728
- # Generate caption for GradCAM overlay image if BLIP model is loaded
729
- if st.session_state.blip_model_loaded:
730
- with st.spinner("Analyzing GradCAM visualization..."):
731
- gradcam_caption = generate_image_caption(
732
- overlay,
733
- st.session_state.blip_processor,
734
- st.session_state.blip_model,
735
- is_gradcam=True,
736
- max_length=100 # Longer for detailed analysis
737
- )
738
- st.session_state.gradcam_caption = gradcam_caption
739
- st.success("βœ… GradCAM analysis complete")
740
-
741
- # Format the GradCAM caption nicely
742
- st.markdown("### GradCAM Analysis:")
743
- st.markdown(gradcam_caption)
744
-
745
- # Save results in session state for LLM analysis
746
- st.session_state.current_image = image
747
- st.session_state.current_overlay = overlay
748
- st.session_state.current_face_box = detected_face_box
749
- st.session_state.current_pred_label = pred_label
750
- st.session_state.current_confidence = confidence
751
-
752
- st.success("βœ… Initial detection and GradCAM visualization complete!")
 
69
  help="Add specific instructions for the LLM analysis"
70
  )
71
 
72
+ # Debug section - try adding this to see if it appears
73
+ st.write("Debug: Initial app setup complete")
74
+
75
  # About section in sidebar
76
  st.sidebar.markdown("---")
77
  st.sidebar.subheader("About")
 
91
  - Blending problems
92
  """)
93
 
94
+ # ----- BLIP Image Captioning -----
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
+ # Define custom prompts for original and GradCAM images - simplified to avoid errors
97
+ ORIGINAL_IMAGE_PROMPT = "Generate a detailed description of this image with the following structure: Subject, Appearance, Pose, Background, Lighting, Colors, Notable Elements"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
+ GRADCAM_IMAGE_PROMPT = "Describe the GradCAM visualization overlay with the following structure: Main Focus Area, High Activation Regions, Medium Activation Regions, Low Activation Regions, Activation Pattern"
100
 
101
  # Function to load BLIP captioning model
102
  @st.cache_resource
 
110
  st.error(f"Error loading BLIP model: {str(e)}")
111
  return None, None
112
 
113
+ # Simplified function to generate image caption
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  def generate_image_caption(image, processor, model, is_gradcam=False, max_length=75, num_beams=5):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  try:
116
  # Select the appropriate prompt based on image type
117
  prompt = GRADCAM_IMAGE_PROMPT if is_gradcam else ORIGINAL_IMAGE_PROMPT
 
140
  st.error(f"Error generating caption: {str(e)}")
141
  return "Error generating caption"
142
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  # Main app
144
  def main():
145
+ # Debug - add this to see if main function is being called
146
+ st.write("Debug: Main function started")
147
+
148
  # Create placeholders for model state
149
  if 'clip_model_loaded' not in st.session_state:
150
  st.session_state.clip_model_loaded = False
 
162
 
163
  # Create expanders for each stage
164
  with st.expander("Stage 1: Model Loading", expanded=True):
165
+ st.write("Please load the models using the buttons below:")
166
+
167
  # Button for loading models
168
  clip_col, llm_col, blip_col = st.columns(3)
169
 
170
  with clip_col:
171
  if not st.session_state.clip_model_loaded:
172
  if st.button("πŸ“₯ Load CLIP Model for Detection", type="primary"):
173
+ st.success("CLIP model button clicked")
174
+ # For now, just set as loaded for testing UI
175
+ st.session_state.clip_model_loaded = True
 
 
 
 
 
176
  else:
177
  st.success("βœ… CLIP model loaded and ready!")
178
 
179
  with llm_col:
180
  if not st.session_state.llm_model_loaded:
181
  if st.button("πŸ“₯ Load Vision LLM for Analysis", type="primary"):
182
+ st.success("LLM model button clicked")
183
+ # For now, just set as loaded for testing UI
184
+ st.session_state.llm_model_loaded = True
 
 
 
 
 
 
185
  else:
186
  st.success("βœ… Vision LLM loaded and ready!")
187
 
188
  with blip_col:
189
  if not st.session_state.blip_model_loaded:
190
  if st.button("πŸ“₯ Load BLIP for Captioning", type="primary"):
191
+ st.success("BLIP model button clicked")
192
+ # For now, just set as loaded for testing UI
193
+ st.session_state.blip_model_loaded = True
 
 
 
 
 
 
194
  else:
195
  st.success("βœ… BLIP captioning model loaded and ready!")
196
 
197
  # Image upload section
198
+ with st.expander("Stage 2: Image Upload", expanded=True):
199
  st.subheader("Upload an Image")
200
  uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
201
 
202
  if uploaded_file is not None:
203
+ st.success("Image uploaded successfully!")
204
  # Display the uploaded image
205
+ try:
206
+ image = Image.open(uploaded_file).convert("RGB")
207
+ st.image(image, caption="Uploaded Image", use_column_width=True)
208
+ st.session_state.current_image = image
209
+ except Exception as e:
210
+ st.error(f"Error loading image: {str(e)}")
211
+
212
+ # Footer
213
+ st.markdown("---")
214
+ st.caption("Advanced Deepfake Image Analyzer with Structured BLIP Captioning")
215
+
216
+ if __name__ == "__main__":
217
+ main()