saakshigupta commited on
Commit
3e4a247
Β·
verified Β·
1 Parent(s): 8b4e8d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +493 -55
app.py CHANGED
@@ -1,11 +1,21 @@
1
  import streamlit as st
2
  import torch
 
 
 
 
3
  from PIL import Image
 
4
  import io
 
 
 
5
  from peft import PeftModel
6
  from unsloth import FastVisionModel
7
- import tempfile
8
  import os
 
 
 
9
 
10
  # App title and description
11
  st.set_page_config(
@@ -15,8 +25,8 @@ st.set_page_config(
15
  )
16
 
17
  # Main title and description
18
- st.title("Deepfake Image Analyzer")
19
- st.markdown("Upload an image to analyze it for possible deepfake manipulation")
20
 
21
  # Check for GPU availability
22
  def check_gpu():
@@ -54,26 +64,360 @@ max_tokens = st.sidebar.slider(
54
  # Custom instruction text area in sidebar
55
  custom_instruction = st.sidebar.text_area(
56
  "Custom Instructions (Advanced)",
57
- value="Analyze for facial inconsistencies, lighting irregularities, mismatched shadows, and other signs of manipulation.",
58
- help="Add specific instructions for the model"
59
  )
60
 
61
  # About section in sidebar
62
  st.sidebar.markdown("---")
63
  st.sidebar.subheader("About")
64
  st.sidebar.markdown("""
65
- This analyzer looks for:
 
 
 
 
 
66
  - Facial inconsistencies
67
  - Unnatural movements
68
  - Lighting issues
69
  - Texture anomalies
70
  - Edge artifacts
71
  - Blending problems
72
-
73
- **Model**: Fine-tuned Llama 3.2 Vision
74
- **Creator**: [Saakshi Gupta](https://huggingface.co/saakshigupta)
75
  """)
76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  # Function to fix cross-attention masks
78
  def fix_cross_attention_mask(inputs):
79
  if 'cross_attention_mask' in inputs and 0 in inputs['cross_attention_mask'].shape:
@@ -87,8 +431,8 @@ def fix_cross_attention_mask(inputs):
87
 
88
  # Load model function
89
  @st.cache_resource
90
- def load_model():
91
- with st.spinner("Loading model... This may take a few minutes. Please be patient..."):
92
  try:
93
  # Check for GPU
94
  has_gpu = check_gpu()
@@ -113,17 +457,18 @@ def load_model():
113
  return None, None
114
 
115
  # Analyze image function
116
- def analyze_image(image, question, model, tokenizer, temperature=0.7, max_tokens=500, custom_instruction=""):
117
- # Combine question with custom instruction if provided
118
  if custom_instruction.strip():
119
- full_prompt = f"{question}\n\nAdditional instructions: {custom_instruction}"
120
  else:
121
- full_prompt = question
122
 
123
- # Format the message
124
  messages = [
125
  {"role": "user", "content": [
126
- {"type": "image"},
 
127
  {"type": "text", "text": full_prompt}
128
  ]}
129
  ]
@@ -133,7 +478,7 @@ def analyze_image(image, question, model, tokenizer, temperature=0.7, max_tokens
133
 
134
  # Process with image
135
  inputs = tokenizer(
136
- image,
137
  input_text,
138
  add_special_tokens=False,
139
  return_tensors="pt",
@@ -143,7 +488,7 @@ def analyze_image(image, question, model, tokenizer, temperature=0.7, max_tokens
143
  inputs = fix_cross_attention_mask(inputs)
144
 
145
  # Generate response
146
- with st.spinner("Analyzing image... (this may take 15-30 seconds)"):
147
  with torch.no_grad():
148
  output_ids = model.generate(
149
  **inputs,
@@ -166,46 +511,137 @@ def analyze_image(image, question, model, tokenizer, temperature=0.7, max_tokens
166
 
167
  # Main app
168
  def main():
169
- # Create a button to load the model
170
- if 'model_loaded' not in st.session_state:
171
- st.session_state.model_loaded = False
172
- st.session_state.model = None
 
 
 
 
173
  st.session_state.tokenizer = None
174
 
175
- # Load model button
176
- if not st.session_state.model_loaded:
177
- if st.button("πŸ“₯ Load Deepfake Analysis Model", type="primary"):
178
- model, tokenizer = load_model()
179
- if model is not None and tokenizer is not None:
180
- st.session_state.model = model
181
- st.session_state.tokenizer = tokenizer
182
- st.session_state.model_loaded = True
183
- st.success("βœ… Model loaded successfully! You can now analyze images.")
 
 
 
 
 
 
 
184
  else:
185
- st.error("❌ Failed to load model. Please check the logs for errors.")
186
- else:
187
- st.success("βœ… Model loaded successfully! You can now analyze images.")
 
 
 
 
 
 
 
 
 
 
 
 
 
188
 
189
  # Image upload section
190
- st.subheader("Upload an Image")
191
- uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
192
-
193
- # Default question with option to customize
194
- default_question = "Analyze this image and tell me if it's a deepfake. Provide both technical and non-technical explanations."
195
- question = st.text_area("Question/Prompt:", value=default_question, height=100)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
 
197
- if uploaded_file is not None:
198
- # Display the uploaded image
199
- image = Image.open(uploaded_file).convert("RGB")
200
- st.image(image, caption="Uploaded Image", use_column_width=True)
201
-
202
- # Analyze button - only enabled if model is loaded
203
- if st.session_state.model_loaded:
204
- if st.button("πŸ” Analyze Image", type="primary"):
205
- result = analyze_image(
206
- image,
207
- question,
208
- st.session_state.model,
 
 
 
 
 
 
 
209
  st.session_state.tokenizer,
210
  temperature=temperature,
211
  max_tokens=max_tokens,
@@ -235,12 +671,14 @@ def main():
235
  # Just display the whole result
236
  st.subheader("Analysis Result")
237
  st.markdown(result)
 
 
238
  else:
239
- st.warning("⚠️ Please load the model first before analyzing images.")
240
 
241
  # Footer
242
  st.markdown("---")
243
- st.caption("Deepfake Image Analyzer")
244
 
245
  if __name__ == "__main__":
246
  main()
 
1
  import streamlit as st
2
  import torch
3
+ import torch.nn as nn
4
+ from torch.utils.data import DataLoader
5
+ from torchvision import transforms
6
+ from transformers import CLIPModel
7
  from PIL import Image
8
+ import numpy as np
9
  import io
10
+ import base64
11
+ import cv2
12
+ import matplotlib.pyplot as plt
13
  from peft import PeftModel
14
  from unsloth import FastVisionModel
 
15
  import os
16
+ import tempfile
17
+ import warnings
18
+ warnings.filterwarnings("ignore", category=UserWarning)
19
 
20
  # App title and description
21
  st.set_page_config(
 
25
  )
26
 
27
  # Main title and description
28
+ st.title("Advanced Deepfake Image Analyzer")
29
+ st.markdown("Analyze images for deepfake manipulation with multi-stage analysis")
30
 
31
  # Check for GPU availability
32
  def check_gpu():
 
64
  # Custom instruction text area in sidebar
65
  custom_instruction = st.sidebar.text_area(
66
  "Custom Instructions (Advanced)",
67
+ value="Focus on analyzing the highlighted regions from the GradCAM visualization. Examine facial inconsistencies, lighting irregularities, and other artifacts visible in the heat map.",
68
+ help="Add specific instructions for the LLM analysis"
69
  )
70
 
71
  # About section in sidebar
72
  st.sidebar.markdown("---")
73
  st.sidebar.subheader("About")
74
  st.sidebar.markdown("""
75
+ This analyzer performs multi-stage detection:
76
+ 1. **Initial Detection**: CLIP-based classifier
77
+ 2. **GradCAM Visualization**: Highlights suspicious regions
78
+ 3. **LLM Analysis**: Fine-tuned Llama 3.2 Vision provides detailed explanations
79
+
80
+ The system looks for:
81
  - Facial inconsistencies
82
  - Unnatural movements
83
  - Lighting issues
84
  - Texture anomalies
85
  - Edge artifacts
86
  - Blending problems
 
 
 
87
  """)
88
 
89
+ # ----- GradCAM Implementation -----
90
+
91
+ class ImageDataset(torch.utils.data.Dataset):
92
+ def __init__(self, image, transform=None, face_only=True, dataset_name=None):
93
+ self.image = image
94
+ self.transform = transform
95
+ self.face_only = face_only
96
+ self.dataset_name = dataset_name
97
+ # Load face detector
98
+ self.face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
99
+
100
+ def __len__(self):
101
+ return 1 # Only one image
102
+
103
+ def detect_face(self, image_np):
104
+ """Detect face in image and return the face region"""
105
+ gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
106
+ faces = self.face_detector.detectMultiScale(gray, 1.1, 5)
107
+
108
+ # If no face is detected, use the whole image
109
+ if len(faces) == 0:
110
+ st.info("No face detected, using whole image for analysis")
111
+ h, w = image_np.shape[:2]
112
+ return (0, 0, w, h), image_np
113
+
114
+ # Get the largest face
115
+ if len(faces) > 1:
116
+ # Choose the largest face by area
117
+ areas = [w*h for (x, y, w, h) in faces]
118
+ largest_idx = np.argmax(areas)
119
+ x, y, w, h = faces[largest_idx]
120
+ else:
121
+ x, y, w, h = faces[0]
122
+
123
+ # Add padding around the face (5% on each side)
124
+ padding_x = int(w * 0.05)
125
+ padding_y = int(h * 0.05)
126
+
127
+ # Ensure padding doesn't go outside image bounds
128
+ x1 = max(0, x - padding_x)
129
+ y1 = max(0, y - padding_y)
130
+ x2 = min(image_np.shape[1], x + w + padding_x)
131
+ y2 = min(image_np.shape[0], y + h + padding_y)
132
+
133
+ # Extract the face region
134
+ face_img = image_np[y1:y2, x1:x2]
135
+
136
+ return (x1, y1, x2-x1, y2-y1), face_img
137
+
138
+ def __getitem__(self, idx):
139
+ image_np = np.array(self.image)
140
+ label = 0 # Default label; will be overridden by prediction
141
+
142
+ # Store original image for visualization
143
+ original_image = self.image.copy()
144
+
145
+ # Detect face if required
146
+ if self.face_only:
147
+ face_box, face_img_np = self.detect_face(image_np)
148
+ face_img = Image.fromarray(face_img_np)
149
+
150
+ # Apply transform to face image
151
+ if self.transform:
152
+ face_tensor = self.transform(face_img)
153
+ else:
154
+ face_tensor = transforms.ToTensor()(face_img)
155
+
156
+ return face_tensor, label, "uploaded_image", original_image, face_box, self.dataset_name
157
+ else:
158
+ # Process the whole image
159
+ if self.transform:
160
+ image_tensor = self.transform(self.image)
161
+ else:
162
+ image_tensor = transforms.ToTensor()(self.image)
163
+
164
+ return image_tensor, label, "uploaded_image", original_image, None, self.dataset_name
165
+
166
+ class GradCAM:
167
+ def __init__(self, model, target_layer):
168
+ self.model = model
169
+ self.target_layer = target_layer
170
+ self.gradients = None
171
+ self.activations = None
172
+ self._register_hooks()
173
+
174
+ def _register_hooks(self):
175
+ def forward_hook(module, input, output):
176
+ if isinstance(output, tuple):
177
+ self.activations = output[0]
178
+ else:
179
+ self.activations = output
180
+
181
+ def backward_hook(module, grad_in, grad_out):
182
+ if isinstance(grad_out, tuple):
183
+ self.gradients = grad_out[0]
184
+ else:
185
+ self.gradients = grad_out
186
+
187
+ layer = dict([*self.model.named_modules()])[self.target_layer]
188
+ layer.register_forward_hook(forward_hook)
189
+ layer.register_backward_hook(backward_hook)
190
+
191
+ def generate(self, input_tensor, class_idx):
192
+ self.model.zero_grad()
193
+
194
+ try:
195
+ # Use only the vision part of the model for gradient calculation
196
+ vision_outputs = self.model.vision_model(pixel_values=input_tensor)
197
+
198
+ # Get the pooler output
199
+ features = vision_outputs.pooler_output
200
+
201
+ # Create a dummy gradient for the feature based on the class idx
202
+ one_hot = torch.zeros_like(features)
203
+ one_hot[0, class_idx] = 1
204
+
205
+ # Manually backpropagate
206
+ features.backward(gradient=one_hot)
207
+
208
+ # Check for None values
209
+ if self.gradients is None or self.activations is None:
210
+ st.warning("Warning: Gradients or activations are None. Using fallback CAM.")
211
+ return np.ones((14, 14), dtype=np.float32) * 0.5
212
+
213
+ # Process gradients and activations for transformer-based model
214
+ gradients = self.gradients.cpu().detach().numpy()
215
+ activations = self.activations.cpu().detach().numpy()
216
+
217
+ if len(activations.shape) == 3: # [batch, sequence_length, hidden_dim]
218
+ seq_len = activations.shape[1]
219
+
220
+ # CLIP ViT typically has 196 patch tokens (14Γ—14) + 1 class token = 197
221
+ if seq_len >= 197:
222
+ # Skip the class token (first token) and reshape the patch tokens into a square
223
+ patch_tokens = activations[0, 1:197, :] # Remove the class token
224
+ # Take the mean across the hidden dimension
225
+ token_importance = np.mean(np.abs(patch_tokens), axis=1)
226
+ # Reshape to the expected grid size (14Γ—14 for CLIP ViT)
227
+ cam = token_importance.reshape(14, 14)
228
+ else:
229
+ # Try to find factors close to a square
230
+ side_len = int(np.sqrt(seq_len))
231
+ # Use the mean across features as importance
232
+ token_importance = np.mean(np.abs(activations[0]), axis=1)
233
+ # Create as square-like shape as possible
234
+ cam = np.zeros((side_len, side_len))
235
+ # Fill the cam with available values
236
+ flat_cam = cam.flatten()
237
+ flat_cam[:min(len(token_importance), len(flat_cam))] = token_importance[:min(len(token_importance), len(flat_cam))]
238
+ cam = flat_cam.reshape(side_len, side_len)
239
+ else:
240
+ # Fallback
241
+ st.info("Using fallback CAM shape (14x14)")
242
+ cam = np.ones((14, 14), dtype=np.float32) * 0.5 # Default fallback
243
+
244
+ # Ensure we have valid values
245
+ cam = np.maximum(cam, 0)
246
+ if np.max(cam) > 0:
247
+ cam = cam / np.max(cam)
248
+
249
+ return cam
250
+
251
+ except Exception as e:
252
+ st.error(f"Error in GradCAM.generate: {str(e)}")
253
+ return np.ones((14, 14), dtype=np.float32) * 0.5
254
+
255
+ def overlay_cam_on_image(image, cam, face_box=None, alpha=0.5):
256
+ """Overlay the CAM on the image"""
257
+ if face_box is not None:
258
+ x, y, w, h = face_box
259
+ # Create a mask for the entire image (all zeros initially)
260
+ img_np = np.array(image)
261
+ full_h, full_w = img_np.shape[:2]
262
+ full_cam = np.zeros((full_h, full_w), dtype=np.float32)
263
+
264
+ # Resize CAM to match face region
265
+ face_cam = cv2.resize(cam, (w, h))
266
+
267
+ # Copy the face CAM into the full image CAM at the face position
268
+ full_cam[y:y+h, x:x+w] = face_cam
269
+
270
+ # Convert full CAM to image
271
+ cam_resized = Image.fromarray((full_cam * 255).astype(np.uint8))
272
+ cam_colormap = plt.cm.jet(np.array(cam_resized) / 255.0)[:, :, :3] # Apply colormap
273
+ cam_colormap = (cam_colormap * 255).astype(np.uint8)
274
+ else:
275
+ # Resize CAM to match image dimensions
276
+ img_np = np.array(image)
277
+ h, w = img_np.shape[:2]
278
+ cam_resized = cv2.resize(cam, (w, h))
279
+
280
+ # Apply colormap
281
+ cam_colormap = plt.cm.jet(cam_resized)[:, :, :3] # Apply colormap
282
+ cam_colormap = (cam_colormap * 255).astype(np.uint8)
283
+
284
+ # Blend the original image with the colormap
285
+ img_np_float = img_np.astype(float) / 255.0
286
+ cam_colormap_float = cam_colormap.astype(float) / 255.0
287
+
288
+ blended = img_np_float * (1 - alpha) + cam_colormap_float * alpha
289
+ blended = (blended * 255).astype(np.uint8)
290
+
291
+ return Image.fromarray(blended)
292
+
293
+ def save_comparison(image, cam, overlay, face_box=None):
294
+ """Create a side-by-side comparison of the original, CAM, and overlay"""
295
+ fig, axes = plt.subplots(1, 3, figsize=(15, 5))
296
+
297
+ # Original Image
298
+ axes[0].imshow(image)
299
+ axes[0].set_title("Original")
300
+ if face_box is not None:
301
+ x, y, w, h = face_box
302
+ rect = plt.Rectangle((x, y), w, h, edgecolor='lime', linewidth=2, fill=False)
303
+ axes[0].add_patch(rect)
304
+ axes[0].axis("off")
305
+
306
+ # CAM
307
+ if face_box is not None:
308
+ # Create a full image CAM that highlights only the face
309
+ img_np = np.array(image)
310
+ h, w = img_np.shape[:2]
311
+ full_cam = np.zeros((h, w))
312
+
313
+ x, y, fw, fh = face_box
314
+ # Resize CAM to face size
315
+ face_cam = cv2.resize(cam, (fw, fh))
316
+ # Place it in the right position
317
+ full_cam[y:y+fh, x:x+fw] = face_cam
318
+ axes[1].imshow(full_cam, cmap="jet")
319
+ else:
320
+ cam_resized = cv2.resize(cam, (image.width, image.height))
321
+ axes[1].imshow(cam_resized, cmap="jet")
322
+ axes[1].set_title("CAM")
323
+ axes[1].axis("off")
324
+
325
+ # Overlay
326
+ axes[2].imshow(overlay)
327
+ axes[2].set_title("Overlay")
328
+ axes[2].axis("off")
329
+
330
+ plt.tight_layout()
331
+
332
+ # Convert plot to PIL Image for Streamlit display
333
+ buf = io.BytesIO()
334
+ plt.savefig(buf, format="png", bbox_inches="tight")
335
+ plt.close()
336
+ buf.seek(0)
337
+ return Image.open(buf)
338
+
339
+ # Function to load GradCAM CLIP model
340
+ @st.cache_resource
341
+ def load_clip_model():
342
+ with st.spinner("Loading CLIP model for GradCAM..."):
343
+ model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
344
+
345
+ # Apply a simple classification head
346
+ model.classification_head = nn.Linear(1024, 2)
347
+ model.classification_head.weight.data.normal_(mean=0.0, std=0.02)
348
+ model.classification_head.bias.data.zero_()
349
+
350
+ model.eval()
351
+ return model
352
+
353
+ def get_target_layer_clip(model):
354
+ """Get the target layer for GradCAM"""
355
+ return "vision_model.encoder.layers.23"
356
+
357
+ def process_image_with_gradcam(image, model, device, pred_class):
358
+ """Process an image with GradCAM"""
359
+ # Set up transformations
360
+ transform = transforms.Compose([
361
+ transforms.Resize((224, 224)),
362
+ transforms.ToTensor(),
363
+ transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
364
+ ])
365
+
366
+ # Create dataset for the single image
367
+ dataset = ImageDataset(image, transform=transform, face_only=True)
368
+
369
+ # Custom collate function
370
+ def custom_collate(batch):
371
+ tensors = [item[0] for item in batch]
372
+ labels = [item[1] for item in batch]
373
+ paths = [item[2] for item in batch]
374
+ images = [item[3] for item in batch]
375
+ face_boxes = [item[4] for item in batch]
376
+ dataset_names = [item[5] for item in batch]
377
+
378
+ tensors = torch.stack(tensors)
379
+ labels = torch.tensor(labels)
380
+
381
+ return tensors, labels, paths, images, face_boxes, dataset_names
382
+
383
+ # Create dataloader
384
+ dataloader = DataLoader(dataset, batch_size=1, shuffle=False, collate_fn=custom_collate)
385
+
386
+ # Extract the batch
387
+ for batch in dataloader:
388
+ input_tensor, label, img_paths, original_images, face_boxes, dataset_names = batch
389
+ original_image = original_images[0]
390
+ face_box = face_boxes[0]
391
+
392
+ # Move tensors and model to device
393
+ input_tensor = input_tensor.to(device)
394
+ model = model.to(device)
395
+
396
+ try:
397
+ # Create GradCAM extractor
398
+ target_layer = get_target_layer_clip(model)
399
+ cam_extractor = GradCAM(model, target_layer)
400
+
401
+ # Generate CAM
402
+ cam = cam_extractor.generate(input_tensor, pred_class)
403
+
404
+ # Create visualizations
405
+ overlay = overlay_cam_on_image(original_image, cam, face_box)
406
+ comparison = save_comparison(original_image, cam, overlay, face_box)
407
+
408
+ # Return results
409
+ return cam, overlay, comparison, face_box
410
+
411
+ except Exception as e:
412
+ st.error(f"Error processing image with GradCAM: {str(e)}")
413
+ # Return default values
414
+ default_cam = np.ones((14, 14), dtype=np.float32) * 0.5
415
+ overlay = overlay_cam_on_image(original_image, default_cam, face_box)
416
+ comparison = save_comparison(original_image, default_cam, overlay, face_box)
417
+ return default_cam, overlay, comparison, face_box
418
+
419
+ # ----- Fine-tuned Vision LLM -----
420
+
421
  # Function to fix cross-attention masks
422
  def fix_cross_attention_mask(inputs):
423
  if 'cross_attention_mask' in inputs and 0 in inputs['cross_attention_mask'].shape:
 
431
 
432
  # Load model function
433
  @st.cache_resource
434
+ def load_llm_model():
435
+ with st.spinner("Loading LLM vision model... This may take a few minutes. Please be patient..."):
436
  try:
437
  # Check for GPU
438
  has_gpu = check_gpu()
 
457
  return None, None
458
 
459
  # Analyze image function
460
+ def analyze_image_with_llm(image, gradcam_overlay, face_box, pred_label, confidence, question, model, tokenizer, temperature=0.7, max_tokens=500, custom_instruction=""):
461
+ # Create a prompt that includes GradCAM information
462
  if custom_instruction.strip():
463
+ full_prompt = f"{question}\n\nThe image has been processed with GradCAM and classified as {pred_label} with confidence {confidence:.2f}. Focus on the highlighted regions in red/yellow which show the areas the detection model found suspicious.\n\n{custom_instruction}"
464
  else:
465
+ full_prompt = f"{question}\n\nThe image has been processed with GradCAM and classified as {pred_label} with confidence {confidence:.2f}. Focus on the highlighted regions in red/yellow which show the areas the detection model found suspicious."
466
 
467
+ # Format the message to include both the original image and the GradCAM visualization
468
  messages = [
469
  {"role": "user", "content": [
470
+ {"type": "image", "image": image}, # Original image
471
+ {"type": "image", "image": gradcam_overlay}, # GradCAM overlay
472
  {"type": "text", "text": full_prompt}
473
  ]}
474
  ]
 
478
 
479
  # Process with image
480
  inputs = tokenizer(
481
+ [image, gradcam_overlay], # Send both images
482
  input_text,
483
  add_special_tokens=False,
484
  return_tensors="pt",
 
488
  inputs = fix_cross_attention_mask(inputs)
489
 
490
  # Generate response
491
+ with st.spinner("Generating detailed analysis... (this may take 15-30 seconds)"):
492
  with torch.no_grad():
493
  output_ids = model.generate(
494
  **inputs,
 
511
 
512
  # Main app
513
  def main():
514
+ # Create placeholders for model state
515
+ if 'clip_model_loaded' not in st.session_state:
516
+ st.session_state.clip_model_loaded = False
517
+ st.session_state.clip_model = None
518
+
519
+ if 'llm_model_loaded' not in st.session_state:
520
+ st.session_state.llm_model_loaded = False
521
+ st.session_state.llm_model = None
522
  st.session_state.tokenizer = None
523
 
524
+ # Create expanders for each stage
525
+ with st.expander("Stage 1: Model Loading", expanded=True):
526
+ # Button for loading CLIP model
527
+ clip_col, llm_col = st.columns(2)
528
+
529
+ with clip_col:
530
+ if not st.session_state.clip_model_loaded:
531
+ if st.button("πŸ“₯ Load CLIP Model for Detection", type="primary"):
532
+ # Load CLIP model
533
+ model = load_clip_model()
534
+ if model is not None:
535
+ st.session_state.clip_model = model
536
+ st.session_state.clip_model_loaded = True
537
+ st.success("βœ… CLIP model loaded successfully!")
538
+ else:
539
+ st.error("❌ Failed to load CLIP model.")
540
  else:
541
+ st.success("βœ… CLIP model loaded and ready!")
542
+
543
+ with llm_col:
544
+ if not st.session_state.llm_model_loaded:
545
+ if st.button("πŸ“₯ Load Vision LLM for Analysis", type="primary"):
546
+ # Load LLM model
547
+ model, tokenizer = load_llm_model()
548
+ if model is not None and tokenizer is not None:
549
+ st.session_state.llm_model = model
550
+ st.session_state.tokenizer = tokenizer
551
+ st.session_state.llm_model_loaded = True
552
+ st.success("βœ… Vision LLM loaded successfully!")
553
+ else:
554
+ st.error("❌ Failed to load Vision LLM.")
555
+ else:
556
+ st.success("βœ… Vision LLM loaded and ready!")
557
 
558
  # Image upload section
559
+ with st.expander("Stage 2: Image Upload & Initial Detection", expanded=True):
560
+ st.subheader("Upload an Image")
561
+ uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
562
+
563
+ if uploaded_file is not None:
564
+ # Display the uploaded image
565
+ image = Image.open(uploaded_file).convert("RGB")
566
+ st.image(image, caption="Uploaded Image", use_column_width=True)
567
+
568
+ # Detect with CLIP model if loaded
569
+ if st.session_state.clip_model_loaded:
570
+ with st.spinner("Analyzing image with CLIP model..."):
571
+ # Preprocess image for CLIP
572
+ transform = transforms.Compose([
573
+ transforms.Resize((224, 224)),
574
+ transforms.ToTensor(),
575
+ transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
576
+ ])
577
+
578
+ # Create a simple dataset for the image
579
+ dataset = ImageDataset(image, transform=transform, face_only=True)
580
+ tensor, _, _, _, face_box, _ = dataset[0]
581
+ tensor = tensor.unsqueeze(0)
582
+
583
+ # Get device
584
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
585
+
586
+ # Move model and tensor to device
587
+ model = st.session_state.clip_model.to(device)
588
+ tensor = tensor.to(device)
589
+
590
+ # Forward pass
591
+ with torch.no_grad():
592
+ outputs = model.vision_model(pixel_values=tensor).pooler_output
593
+ logits = model.classification_head(outputs)
594
+ probs = torch.softmax(logits, dim=1)[0]
595
+ pred_class = torch.argmax(probs).item()
596
+ confidence = probs[pred_class].item()
597
+ pred_label = "Fake" if pred_class == 1 else "Real"
598
+
599
+ # Display results
600
+ result_col1, result_col2 = st.columns(2)
601
+ with result_col1:
602
+ st.metric("Prediction", pred_label)
603
+ with result_col2:
604
+ st.metric("Confidence", f"{confidence:.2%}")
605
+
606
+ # GradCAM visualization
607
+ st.subheader("GradCAM Visualization")
608
+ cam, overlay, comparison, detected_face_box = process_image_with_gradcam(
609
+ image, model, device, pred_class
610
+ )
611
+
612
+ # Display GradCAM results
613
+ st.image(comparison, caption="Original | CAM | Overlay", use_column_width=True)
614
+
615
+ # Save results in session state for LLM analysis
616
+ st.session_state.current_image = image
617
+ st.session_state.current_overlay = overlay
618
+ st.session_state.current_face_box = detected_face_box
619
+ st.session_state.current_pred_label = pred_label
620
+ st.session_state.current_confidence = confidence
621
+
622
+ st.success("βœ… Initial detection and GradCAM visualization complete!")
623
+ else:
624
+ st.warning("⚠️ Please load the CLIP model first to perform initial detection.")
625
 
626
+ # LLM Analysis section
627
+ with st.expander("Stage 3: Detailed Analysis with Vision LLM", expanded=False):
628
+ if hasattr(st.session_state, 'current_image') and st.session_state.llm_model_loaded:
629
+ st.subheader("Detailed Deepfake Analysis")
630
+
631
+ # Default question with option to customize
632
+ default_question = f"This image has been classified as {st.session_state.current_pred_label}. Analyze the key features that led to this classification, focusing on the highlighted areas in the GradCAM visualization. Provide both a technical explanation for experts and a simple explanation for non-technical users."
633
+ question = st.text_area("Question/Prompt:", value=default_question, height=100)
634
+
635
+ # Analyze button
636
+ if st.button("πŸ” Perform Detailed Analysis", type="primary"):
637
+ result = analyze_image_with_llm(
638
+ st.session_state.current_image,
639
+ st.session_state.current_overlay,
640
+ st.session_state.current_face_box,
641
+ st.session_state.current_pred_label,
642
+ st.session_state.current_confidence,
643
+ question,
644
+ st.session_state.llm_model,
645
  st.session_state.tokenizer,
646
  temperature=temperature,
647
  max_tokens=max_tokens,
 
671
  # Just display the whole result
672
  st.subheader("Analysis Result")
673
  st.markdown(result)
674
+ elif not hasattr(st.session_state, 'current_image'):
675
+ st.warning("⚠️ Please upload an image and complete the initial detection first.")
676
  else:
677
+ st.warning("⚠️ Please load the Vision LLM to perform detailed analysis.")
678
 
679
  # Footer
680
  st.markdown("---")
681
+ st.caption("Advanced Deepfake Image Analyzer")
682
 
683
  if __name__ == "__main__":
684
  main()