saakshigupta commited on
Commit
7f188a6
Β·
verified Β·
1 Parent(s): 7dfa8f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +494 -55
app.py CHANGED
@@ -1,11 +1,22 @@
1
  import streamlit as st
2
  import torch
 
 
 
 
 
3
  from PIL import Image
 
4
  import io
 
 
 
5
  from peft import PeftModel
6
  from unsloth import FastVisionModel
7
- import tempfile
8
  import os
 
 
 
9
 
10
  # App title and description
11
  st.set_page_config(
@@ -15,8 +26,8 @@ st.set_page_config(
15
  )
16
 
17
  # Main title and description
18
- st.title("Deepfake Image Analyzer")
19
- st.markdown("Upload an image to analyze it for possible deepfake manipulation")
20
 
21
  # Check for GPU availability
22
  def check_gpu():
@@ -54,26 +65,360 @@ max_tokens = st.sidebar.slider(
54
  # Custom instruction text area in sidebar
55
  custom_instruction = st.sidebar.text_area(
56
  "Custom Instructions (Advanced)",
57
- value="Analyze for facial inconsistencies, lighting irregularities, mismatched shadows, and other signs of manipulation.",
58
- help="Add specific instructions for the model"
59
  )
60
 
61
  # About section in sidebar
62
  st.sidebar.markdown("---")
63
  st.sidebar.subheader("About")
64
  st.sidebar.markdown("""
65
- This analyzer looks for:
 
 
 
 
 
66
  - Facial inconsistencies
67
  - Unnatural movements
68
  - Lighting issues
69
  - Texture anomalies
70
  - Edge artifacts
71
  - Blending problems
72
-
73
- **Model**: Fine-tuned Llama 3.2 Vision
74
- **Creator**: [Saakshi Gupta](https://huggingface.co/saakshigupta)
75
  """)
76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  # Function to fix cross-attention masks
78
  def fix_cross_attention_mask(inputs):
79
  if 'cross_attention_mask' in inputs and 0 in inputs['cross_attention_mask'].shape:
@@ -87,8 +432,8 @@ def fix_cross_attention_mask(inputs):
87
 
88
  # Load model function
89
  @st.cache_resource
90
- def load_model():
91
- with st.spinner("Loading model... This may take a few minutes. Please be patient..."):
92
  try:
93
  # Check for GPU
94
  has_gpu = check_gpu()
@@ -113,17 +458,18 @@ def load_model():
113
  return None, None
114
 
115
  # Analyze image function
116
- def analyze_image(image, question, model, tokenizer, temperature=0.7, max_tokens=500, custom_instruction=""):
117
- # Combine question with custom instruction if provided
118
  if custom_instruction.strip():
119
- full_prompt = f"{question}\n\nAdditional instructions: {custom_instruction}"
120
  else:
121
- full_prompt = question
122
 
123
- # Format the message
124
  messages = [
125
  {"role": "user", "content": [
126
- {"type": "image"},
 
127
  {"type": "text", "text": full_prompt}
128
  ]}
129
  ]
@@ -133,7 +479,7 @@ def analyze_image(image, question, model, tokenizer, temperature=0.7, max_tokens
133
 
134
  # Process with image
135
  inputs = tokenizer(
136
- image,
137
  input_text,
138
  add_special_tokens=False,
139
  return_tensors="pt",
@@ -143,7 +489,7 @@ def analyze_image(image, question, model, tokenizer, temperature=0.7, max_tokens
143
  inputs = fix_cross_attention_mask(inputs)
144
 
145
  # Generate response
146
- with st.spinner("Analyzing image... (this may take 15-30 seconds)"):
147
  with torch.no_grad():
148
  output_ids = model.generate(
149
  **inputs,
@@ -166,46 +512,137 @@ def analyze_image(image, question, model, tokenizer, temperature=0.7, max_tokens
166
 
167
  # Main app
168
  def main():
169
- # Create a button to load the model
170
- if 'model_loaded' not in st.session_state:
171
- st.session_state.model_loaded = False
172
- st.session_state.model = None
 
 
 
 
173
  st.session_state.tokenizer = None
174
 
175
- # Load model button
176
- if not st.session_state.model_loaded:
177
- if st.button("πŸ“₯ Load Deepfake Analysis Model", type="primary"):
178
- model, tokenizer = load_model()
179
- if model is not None and tokenizer is not None:
180
- st.session_state.model = model
181
- st.session_state.tokenizer = tokenizer
182
- st.session_state.model_loaded = True
183
- st.success("βœ… Model loaded successfully! You can now analyze images.")
 
 
 
 
 
 
 
184
  else:
185
- st.error("❌ Failed to load model. Please check the logs for errors.")
186
- else:
187
- st.success("βœ… Model loaded successfully! You can now analyze images.")
 
 
 
 
 
 
 
 
 
 
 
 
 
188
 
189
  # Image upload section
190
- st.subheader("Upload an Image")
191
- uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
192
-
193
- # Default question with option to customize
194
- default_question = "Analyze this image and tell me if it's a deepfake. Provide both technical and non-technical explanations."
195
- question = st.text_area("Question/Prompt:", value=default_question, height=100)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
 
197
- if uploaded_file is not None:
198
- # Display the uploaded image
199
- image = Image.open(uploaded_file).convert("RGB")
200
- st.image(image, caption="Uploaded Image", use_column_width=True)
201
-
202
- # Analyze button - only enabled if model is loaded
203
- if st.session_state.model_loaded:
204
- if st.button("πŸ” Analyze Image", type="primary"):
205
- result = analyze_image(
206
- image,
207
- question,
208
- st.session_state.model,
 
 
 
 
 
 
 
209
  st.session_state.tokenizer,
210
  temperature=temperature,
211
  max_tokens=max_tokens,
@@ -235,12 +672,14 @@ def main():
235
  # Just display the whole result
236
  st.subheader("Analysis Result")
237
  st.markdown(result)
 
 
238
  else:
239
- st.warning("⚠️ Please load the model first before analyzing images.")
240
 
241
  # Footer
242
  st.markdown("---")
243
- st.caption("Deepfake Image Analyzer")
244
 
245
  if __name__ == "__main__":
246
  main()
 
1
  import streamlit as st
2
  import torch
3
+ import torch.nn as nn
4
+ from torch.utils.data import DataLoader
5
+ from torchvision import transforms
6
+ from transformers import CLIPModel
7
+ from transformers.models.clip import CLIPModel
8
  from PIL import Image
9
+ import numpy as np
10
  import io
11
+ import base64
12
+ import cv2
13
+ import matplotlib.pyplot as plt
14
  from peft import PeftModel
15
  from unsloth import FastVisionModel
 
16
  import os
17
+ import tempfile
18
+ import warnings
19
+ warnings.filterwarnings("ignore", category=UserWarning)
20
 
21
  # App title and description
22
  st.set_page_config(
 
26
  )
27
 
28
  # Main title and description
29
+ st.title("Advanced Deepfake Image Analyzer")
30
+ st.markdown("Analyze images for deepfake manipulation with multi-stage analysis")
31
 
32
  # Check for GPU availability
33
  def check_gpu():
 
65
  # Custom instruction text area in sidebar
66
  custom_instruction = st.sidebar.text_area(
67
  "Custom Instructions (Advanced)",
68
+ value="Focus on analyzing the highlighted regions from the GradCAM visualization. Examine facial inconsistencies, lighting irregularities, and other artifacts visible in the heat map.",
69
+ help="Add specific instructions for the LLM analysis"
70
  )
71
 
72
  # About section in sidebar
73
  st.sidebar.markdown("---")
74
  st.sidebar.subheader("About")
75
  st.sidebar.markdown("""
76
+ This analyzer performs multi-stage detection:
77
+ 1. **Initial Detection**: CLIP-based classifier
78
+ 2. **GradCAM Visualization**: Highlights suspicious regions
79
+ 3. **LLM Analysis**: Fine-tuned Llama 3.2 Vision provides detailed explanations
80
+
81
+ The system looks for:
82
  - Facial inconsistencies
83
  - Unnatural movements
84
  - Lighting issues
85
  - Texture anomalies
86
  - Edge artifacts
87
  - Blending problems
 
 
 
88
  """)
89
 
90
+ # ----- GradCAM Implementation -----
91
+
92
+ class ImageDataset(torch.utils.data.Dataset):
93
+ def __init__(self, image, transform=None, face_only=True, dataset_name=None):
94
+ self.image = image
95
+ self.transform = transform
96
+ self.face_only = face_only
97
+ self.dataset_name = dataset_name
98
+ # Load face detector
99
+ self.face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
100
+
101
+ def __len__(self):
102
+ return 1 # Only one image
103
+
104
+ def detect_face(self, image_np):
105
+ """Detect face in image and return the face region"""
106
+ gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
107
+ faces = self.face_detector.detectMultiScale(gray, 1.1, 5)
108
+
109
+ # If no face is detected, use the whole image
110
+ if len(faces) == 0:
111
+ st.info("No face detected, using whole image for analysis")
112
+ h, w = image_np.shape[:2]
113
+ return (0, 0, w, h), image_np
114
+
115
+ # Get the largest face
116
+ if len(faces) > 1:
117
+ # Choose the largest face by area
118
+ areas = [w*h for (x, y, w, h) in faces]
119
+ largest_idx = np.argmax(areas)
120
+ x, y, w, h = faces[largest_idx]
121
+ else:
122
+ x, y, w, h = faces[0]
123
+
124
+ # Add padding around the face (5% on each side)
125
+ padding_x = int(w * 0.05)
126
+ padding_y = int(h * 0.05)
127
+
128
+ # Ensure padding doesn't go outside image bounds
129
+ x1 = max(0, x - padding_x)
130
+ y1 = max(0, y - padding_y)
131
+ x2 = min(image_np.shape[1], x + w + padding_x)
132
+ y2 = min(image_np.shape[0], y + h + padding_y)
133
+
134
+ # Extract the face region
135
+ face_img = image_np[y1:y2, x1:x2]
136
+
137
+ return (x1, y1, x2-x1, y2-y1), face_img
138
+
139
+ def __getitem__(self, idx):
140
+ image_np = np.array(self.image)
141
+ label = 0 # Default label; will be overridden by prediction
142
+
143
+ # Store original image for visualization
144
+ original_image = self.image.copy()
145
+
146
+ # Detect face if required
147
+ if self.face_only:
148
+ face_box, face_img_np = self.detect_face(image_np)
149
+ face_img = Image.fromarray(face_img_np)
150
+
151
+ # Apply transform to face image
152
+ if self.transform:
153
+ face_tensor = self.transform(face_img)
154
+ else:
155
+ face_tensor = transforms.ToTensor()(face_img)
156
+
157
+ return face_tensor, label, "uploaded_image", original_image, face_box, self.dataset_name
158
+ else:
159
+ # Process the whole image
160
+ if self.transform:
161
+ image_tensor = self.transform(self.image)
162
+ else:
163
+ image_tensor = transforms.ToTensor()(self.image)
164
+
165
+ return image_tensor, label, "uploaded_image", original_image, None, self.dataset_name
166
+
167
+ class GradCAM:
168
+ def __init__(self, model, target_layer):
169
+ self.model = model
170
+ self.target_layer = target_layer
171
+ self.gradients = None
172
+ self.activations = None
173
+ self._register_hooks()
174
+
175
+ def _register_hooks(self):
176
+ def forward_hook(module, input, output):
177
+ if isinstance(output, tuple):
178
+ self.activations = output[0]
179
+ else:
180
+ self.activations = output
181
+
182
+ def backward_hook(module, grad_in, grad_out):
183
+ if isinstance(grad_out, tuple):
184
+ self.gradients = grad_out[0]
185
+ else:
186
+ self.gradients = grad_out
187
+
188
+ layer = dict([*self.model.named_modules()])[self.target_layer]
189
+ layer.register_forward_hook(forward_hook)
190
+ layer.register_backward_hook(backward_hook)
191
+
192
+ def generate(self, input_tensor, class_idx):
193
+ self.model.zero_grad()
194
+
195
+ try:
196
+ # Use only the vision part of the model for gradient calculation
197
+ vision_outputs = self.model.vision_model(pixel_values=input_tensor)
198
+
199
+ # Get the pooler output
200
+ features = vision_outputs.pooler_output
201
+
202
+ # Create a dummy gradient for the feature based on the class idx
203
+ one_hot = torch.zeros_like(features)
204
+ one_hot[0, class_idx] = 1
205
+
206
+ # Manually backpropagate
207
+ features.backward(gradient=one_hot)
208
+
209
+ # Check for None values
210
+ if self.gradients is None or self.activations is None:
211
+ st.warning("Warning: Gradients or activations are None. Using fallback CAM.")
212
+ return np.ones((14, 14), dtype=np.float32) * 0.5
213
+
214
+ # Process gradients and activations for transformer-based model
215
+ gradients = self.gradients.cpu().detach().numpy()
216
+ activations = self.activations.cpu().detach().numpy()
217
+
218
+ if len(activations.shape) == 3: # [batch, sequence_length, hidden_dim]
219
+ seq_len = activations.shape[1]
220
+
221
+ # CLIP ViT typically has 196 patch tokens (14Γ—14) + 1 class token = 197
222
+ if seq_len >= 197:
223
+ # Skip the class token (first token) and reshape the patch tokens into a square
224
+ patch_tokens = activations[0, 1:197, :] # Remove the class token
225
+ # Take the mean across the hidden dimension
226
+ token_importance = np.mean(np.abs(patch_tokens), axis=1)
227
+ # Reshape to the expected grid size (14Γ—14 for CLIP ViT)
228
+ cam = token_importance.reshape(14, 14)
229
+ else:
230
+ # Try to find factors close to a square
231
+ side_len = int(np.sqrt(seq_len))
232
+ # Use the mean across features as importance
233
+ token_importance = np.mean(np.abs(activations[0]), axis=1)
234
+ # Create as square-like shape as possible
235
+ cam = np.zeros((side_len, side_len))
236
+ # Fill the cam with available values
237
+ flat_cam = cam.flatten()
238
+ flat_cam[:min(len(token_importance), len(flat_cam))] = token_importance[:min(len(token_importance), len(flat_cam))]
239
+ cam = flat_cam.reshape(side_len, side_len)
240
+ else:
241
+ # Fallback
242
+ st.info("Using fallback CAM shape (14x14)")
243
+ cam = np.ones((14, 14), dtype=np.float32) * 0.5 # Default fallback
244
+
245
+ # Ensure we have valid values
246
+ cam = np.maximum(cam, 0)
247
+ if np.max(cam) > 0:
248
+ cam = cam / np.max(cam)
249
+
250
+ return cam
251
+
252
+ except Exception as e:
253
+ st.error(f"Error in GradCAM.generate: {str(e)}")
254
+ return np.ones((14, 14), dtype=np.float32) * 0.5
255
+
256
+ def overlay_cam_on_image(image, cam, face_box=None, alpha=0.5):
257
+ """Overlay the CAM on the image"""
258
+ if face_box is not None:
259
+ x, y, w, h = face_box
260
+ # Create a mask for the entire image (all zeros initially)
261
+ img_np = np.array(image)
262
+ full_h, full_w = img_np.shape[:2]
263
+ full_cam = np.zeros((full_h, full_w), dtype=np.float32)
264
+
265
+ # Resize CAM to match face region
266
+ face_cam = cv2.resize(cam, (w, h))
267
+
268
+ # Copy the face CAM into the full image CAM at the face position
269
+ full_cam[y:y+h, x:x+w] = face_cam
270
+
271
+ # Convert full CAM to image
272
+ cam_resized = Image.fromarray((full_cam * 255).astype(np.uint8))
273
+ cam_colormap = plt.cm.jet(np.array(cam_resized) / 255.0)[:, :, :3] # Apply colormap
274
+ cam_colormap = (cam_colormap * 255).astype(np.uint8)
275
+ else:
276
+ # Resize CAM to match image dimensions
277
+ img_np = np.array(image)
278
+ h, w = img_np.shape[:2]
279
+ cam_resized = cv2.resize(cam, (w, h))
280
+
281
+ # Apply colormap
282
+ cam_colormap = plt.cm.jet(cam_resized)[:, :, :3] # Apply colormap
283
+ cam_colormap = (cam_colormap * 255).astype(np.uint8)
284
+
285
+ # Blend the original image with the colormap
286
+ img_np_float = img_np.astype(float) / 255.0
287
+ cam_colormap_float = cam_colormap.astype(float) / 255.0
288
+
289
+ blended = img_np_float * (1 - alpha) + cam_colormap_float * alpha
290
+ blended = (blended * 255).astype(np.uint8)
291
+
292
+ return Image.fromarray(blended)
293
+
294
+ def save_comparison(image, cam, overlay, face_box=None):
295
+ """Create a side-by-side comparison of the original, CAM, and overlay"""
296
+ fig, axes = plt.subplots(1, 3, figsize=(15, 5))
297
+
298
+ # Original Image
299
+ axes[0].imshow(image)
300
+ axes[0].set_title("Original")
301
+ if face_box is not None:
302
+ x, y, w, h = face_box
303
+ rect = plt.Rectangle((x, y), w, h, edgecolor='lime', linewidth=2, fill=False)
304
+ axes[0].add_patch(rect)
305
+ axes[0].axis("off")
306
+
307
+ # CAM
308
+ if face_box is not None:
309
+ # Create a full image CAM that highlights only the face
310
+ img_np = np.array(image)
311
+ h, w = img_np.shape[:2]
312
+ full_cam = np.zeros((h, w))
313
+
314
+ x, y, fw, fh = face_box
315
+ # Resize CAM to face size
316
+ face_cam = cv2.resize(cam, (fw, fh))
317
+ # Place it in the right position
318
+ full_cam[y:y+fh, x:x+fw] = face_cam
319
+ axes[1].imshow(full_cam, cmap="jet")
320
+ else:
321
+ cam_resized = cv2.resize(cam, (image.width, image.height))
322
+ axes[1].imshow(cam_resized, cmap="jet")
323
+ axes[1].set_title("CAM")
324
+ axes[1].axis("off")
325
+
326
+ # Overlay
327
+ axes[2].imshow(overlay)
328
+ axes[2].set_title("Overlay")
329
+ axes[2].axis("off")
330
+
331
+ plt.tight_layout()
332
+
333
+ # Convert plot to PIL Image for Streamlit display
334
+ buf = io.BytesIO()
335
+ plt.savefig(buf, format="png", bbox_inches="tight")
336
+ plt.close()
337
+ buf.seek(0)
338
+ return Image.open(buf)
339
+
340
+ # Function to load GradCAM CLIP model
341
+ @st.cache_resource
342
+ def load_clip_model():
343
+ with st.spinner("Loading CLIP model for GradCAM..."):
344
+ model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
345
+
346
+ # Apply a simple classification head
347
+ model.classification_head = nn.Linear(1024, 2)
348
+ model.classification_head.weight.data.normal_(mean=0.0, std=0.02)
349
+ model.classification_head.bias.data.zero_()
350
+
351
+ model.eval()
352
+ return model
353
+
354
+ def get_target_layer_clip(model):
355
+ """Get the target layer for GradCAM"""
356
+ return "vision_model.encoder.layers.23"
357
+
358
+ def process_image_with_gradcam(image, model, device, pred_class):
359
+ """Process an image with GradCAM"""
360
+ # Set up transformations
361
+ transform = transforms.Compose([
362
+ transforms.Resize((224, 224)),
363
+ transforms.ToTensor(),
364
+ transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
365
+ ])
366
+
367
+ # Create dataset for the single image
368
+ dataset = ImageDataset(image, transform=transform, face_only=True)
369
+
370
+ # Custom collate function
371
+ def custom_collate(batch):
372
+ tensors = [item[0] for item in batch]
373
+ labels = [item[1] for item in batch]
374
+ paths = [item[2] for item in batch]
375
+ images = [item[3] for item in batch]
376
+ face_boxes = [item[4] for item in batch]
377
+ dataset_names = [item[5] for item in batch]
378
+
379
+ tensors = torch.stack(tensors)
380
+ labels = torch.tensor(labels)
381
+
382
+ return tensors, labels, paths, images, face_boxes, dataset_names
383
+
384
+ # Create dataloader
385
+ dataloader = DataLoader(dataset, batch_size=1, shuffle=False, collate_fn=custom_collate)
386
+
387
+ # Extract the batch
388
+ for batch in dataloader:
389
+ input_tensor, label, img_paths, original_images, face_boxes, dataset_names = batch
390
+ original_image = original_images[0]
391
+ face_box = face_boxes[0]
392
+
393
+ # Move tensors and model to device
394
+ input_tensor = input_tensor.to(device)
395
+ model = model.to(device)
396
+
397
+ try:
398
+ # Create GradCAM extractor
399
+ target_layer = get_target_layer_clip(model)
400
+ cam_extractor = GradCAM(model, target_layer)
401
+
402
+ # Generate CAM
403
+ cam = cam_extractor.generate(input_tensor, pred_class)
404
+
405
+ # Create visualizations
406
+ overlay = overlay_cam_on_image(original_image, cam, face_box)
407
+ comparison = save_comparison(original_image, cam, overlay, face_box)
408
+
409
+ # Return results
410
+ return cam, overlay, comparison, face_box
411
+
412
+ except Exception as e:
413
+ st.error(f"Error processing image with GradCAM: {str(e)}")
414
+ # Return default values
415
+ default_cam = np.ones((14, 14), dtype=np.float32) * 0.5
416
+ overlay = overlay_cam_on_image(original_image, default_cam, face_box)
417
+ comparison = save_comparison(original_image, default_cam, overlay, face_box)
418
+ return default_cam, overlay, comparison, face_box
419
+
420
+ # ----- Fine-tuned Vision LLM -----
421
+
422
  # Function to fix cross-attention masks
423
  def fix_cross_attention_mask(inputs):
424
  if 'cross_attention_mask' in inputs and 0 in inputs['cross_attention_mask'].shape:
 
432
 
433
  # Load model function
434
  @st.cache_resource
435
+ def load_llm_model():
436
+ with st.spinner("Loading LLM vision model... This may take a few minutes. Please be patient..."):
437
  try:
438
  # Check for GPU
439
  has_gpu = check_gpu()
 
458
  return None, None
459
 
460
  # Analyze image function
461
+ def analyze_image_with_llm(image, gradcam_overlay, face_box, pred_label, confidence, question, model, tokenizer, temperature=0.7, max_tokens=500, custom_instruction=""):
462
+ # Create a prompt that includes GradCAM information
463
  if custom_instruction.strip():
464
+ full_prompt = f"{question}\n\nThe image has been processed with GradCAM and classified as {pred_label} with confidence {confidence:.2f}. Focus on the highlighted regions in red/yellow which show the areas the detection model found suspicious.\n\n{custom_instruction}"
465
  else:
466
+ full_prompt = f"{question}\n\nThe image has been processed with GradCAM and classified as {pred_label} with confidence {confidence:.2f}. Focus on the highlighted regions in red/yellow which show the areas the detection model found suspicious."
467
 
468
+ # Format the message to include both the original image and the GradCAM visualization
469
  messages = [
470
  {"role": "user", "content": [
471
+ {"type": "image", "image": image}, # Original image
472
+ {"type": "image", "image": gradcam_overlay}, # GradCAM overlay
473
  {"type": "text", "text": full_prompt}
474
  ]}
475
  ]
 
479
 
480
  # Process with image
481
  inputs = tokenizer(
482
+ [image, gradcam_overlay], # Send both images
483
  input_text,
484
  add_special_tokens=False,
485
  return_tensors="pt",
 
489
  inputs = fix_cross_attention_mask(inputs)
490
 
491
  # Generate response
492
+ with st.spinner("Generating detailed analysis... (this may take 15-30 seconds)"):
493
  with torch.no_grad():
494
  output_ids = model.generate(
495
  **inputs,
 
512
 
513
  # Main app
514
  def main():
515
+ # Create placeholders for model state
516
+ if 'clip_model_loaded' not in st.session_state:
517
+ st.session_state.clip_model_loaded = False
518
+ st.session_state.clip_model = None
519
+
520
+ if 'llm_model_loaded' not in st.session_state:
521
+ st.session_state.llm_model_loaded = False
522
+ st.session_state.llm_model = None
523
  st.session_state.tokenizer = None
524
 
525
+ # Create expanders for each stage
526
+ with st.expander("Stage 1: Model Loading", expanded=True):
527
+ # Button for loading CLIP model
528
+ clip_col, llm_col = st.columns(2)
529
+
530
+ with clip_col:
531
+ if not st.session_state.clip_model_loaded:
532
+ if st.button("πŸ“₯ Load CLIP Model for Detection", type="primary"):
533
+ # Load CLIP model
534
+ model = load_clip_model()
535
+ if model is not None:
536
+ st.session_state.clip_model = model
537
+ st.session_state.clip_model_loaded = True
538
+ st.success("βœ… CLIP model loaded successfully!")
539
+ else:
540
+ st.error("❌ Failed to load CLIP model.")
541
  else:
542
+ st.success("βœ… CLIP model loaded and ready!")
543
+
544
+ with llm_col:
545
+ if not st.session_state.llm_model_loaded:
546
+ if st.button("πŸ“₯ Load Vision LLM for Analysis", type="primary"):
547
+ # Load LLM model
548
+ model, tokenizer = load_llm_model()
549
+ if model is not None and tokenizer is not None:
550
+ st.session_state.llm_model = model
551
+ st.session_state.tokenizer = tokenizer
552
+ st.session_state.llm_model_loaded = True
553
+ st.success("βœ… Vision LLM loaded successfully!")
554
+ else:
555
+ st.error("❌ Failed to load Vision LLM.")
556
+ else:
557
+ st.success("βœ… Vision LLM loaded and ready!")
558
 
559
  # Image upload section
560
+ with st.expander("Stage 2: Image Upload & Initial Detection", expanded=True):
561
+ st.subheader("Upload an Image")
562
+ uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
563
+
564
+ if uploaded_file is not None:
565
+ # Display the uploaded image
566
+ image = Image.open(uploaded_file).convert("RGB")
567
+ st.image(image, caption="Uploaded Image", use_column_width=True)
568
+
569
+ # Detect with CLIP model if loaded
570
+ if st.session_state.clip_model_loaded:
571
+ with st.spinner("Analyzing image with CLIP model..."):
572
+ # Preprocess image for CLIP
573
+ transform = transforms.Compose([
574
+ transforms.Resize((224, 224)),
575
+ transforms.ToTensor(),
576
+ transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
577
+ ])
578
+
579
+ # Create a simple dataset for the image
580
+ dataset = ImageDataset(image, transform=transform, face_only=True)
581
+ tensor, _, _, _, face_box, _ = dataset[0]
582
+ tensor = tensor.unsqueeze(0)
583
+
584
+ # Get device
585
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
586
+
587
+ # Move model and tensor to device
588
+ model = st.session_state.clip_model.to(device)
589
+ tensor = tensor.to(device)
590
+
591
+ # Forward pass
592
+ with torch.no_grad():
593
+ outputs = model.vision_model(pixel_values=tensor).pooler_output
594
+ logits = model.classification_head(outputs)
595
+ probs = torch.softmax(logits, dim=1)[0]
596
+ pred_class = torch.argmax(probs).item()
597
+ confidence = probs[pred_class].item()
598
+ pred_label = "Fake" if pred_class == 1 else "Real"
599
+
600
+ # Display results
601
+ result_col1, result_col2 = st.columns(2)
602
+ with result_col1:
603
+ st.metric("Prediction", pred_label)
604
+ with result_col2:
605
+ st.metric("Confidence", f"{confidence:.2%}")
606
+
607
+ # GradCAM visualization
608
+ st.subheader("GradCAM Visualization")
609
+ cam, overlay, comparison, detected_face_box = process_image_with_gradcam(
610
+ image, model, device, pred_class
611
+ )
612
+
613
+ # Display GradCAM results
614
+ st.image(comparison, caption="Original | CAM | Overlay", use_column_width=True)
615
+
616
+ # Save results in session state for LLM analysis
617
+ st.session_state.current_image = image
618
+ st.session_state.current_overlay = overlay
619
+ st.session_state.current_face_box = detected_face_box
620
+ st.session_state.current_pred_label = pred_label
621
+ st.session_state.current_confidence = confidence
622
+
623
+ st.success("βœ… Initial detection and GradCAM visualization complete!")
624
+ else:
625
+ st.warning("⚠️ Please load the CLIP model first to perform initial detection.")
626
 
627
+ # LLM Analysis section
628
+ with st.expander("Stage 3: Detailed Analysis with Vision LLM", expanded=False):
629
+ if hasattr(st.session_state, 'current_image') and st.session_state.llm_model_loaded:
630
+ st.subheader("Detailed Deepfake Analysis")
631
+
632
+ # Default question with option to customize
633
+ default_question = f"This image has been classified as {st.session_state.current_pred_label}. Analyze the key features that led to this classification, focusing on the highlighted areas in the GradCAM visualization. Provide both a technical explanation for experts and a simple explanation for non-technical users."
634
+ question = st.text_area("Question/Prompt:", value=default_question, height=100)
635
+
636
+ # Analyze button
637
+ if st.button("πŸ” Perform Detailed Analysis", type="primary"):
638
+ result = analyze_image_with_llm(
639
+ st.session_state.current_image,
640
+ st.session_state.current_overlay,
641
+ st.session_state.current_face_box,
642
+ st.session_state.current_pred_label,
643
+ st.session_state.current_confidence,
644
+ question,
645
+ st.session_state.llm_model,
646
  st.session_state.tokenizer,
647
  temperature=temperature,
648
  max_tokens=max_tokens,
 
672
  # Just display the whole result
673
  st.subheader("Analysis Result")
674
  st.markdown(result)
675
+ elif not hasattr(st.session_state, 'current_image'):
676
+ st.warning("⚠️ Please upload an image and complete the initial detection first.")
677
  else:
678
+ st.warning("⚠️ Please load the Vision LLM to perform detailed analysis.")
679
 
680
  # Footer
681
  st.markdown("---")
682
+ st.caption("Advanced Deepfake Image Analyzer")
683
 
684
  if __name__ == "__main__":
685
  main()