File size: 27,897 Bytes
3680540
 
7f188a6
 
 
 
 
3680540
7f188a6
c692452
7f188a6
 
 
3680540
c692452
 
7f188a6
 
 
3680540
c692452
3680540
c692452
 
 
3680540
 
c692452
7f188a6
 
3680540
c692452
 
3680540
c692452
 
 
3680540
c692452
 
3680540
c692452
 
3680540
c692452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7f188a6
 
c692452
 
 
 
 
 
7f188a6
 
 
 
 
 
c692452
 
 
 
 
 
 
3680540
7f188a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3680540
c692452
3680540
 
c692452
 
 
3680540
c692452
 
3680540
c692452
 
7f188a6
 
3680540
c692452
 
 
 
 
 
 
 
 
3680540
c692452
 
 
3680540
c692452
 
 
 
 
 
 
3680540
c692452
7f188a6
 
c692452
7f188a6
c692452
7f188a6
3680540
7f188a6
c692452
 
7f188a6
 
c692452
 
 
 
 
 
 
 
 
7f188a6
c692452
 
 
 
 
 
 
 
 
7f188a6
c692452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3680540
c692452
 
 
 
 
 
7f188a6
 
 
 
 
 
 
 
c692452
 
7f188a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c692452
7f188a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c692452
 
7f188a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c692452
7f188a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c692452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3680540
c692452
 
 
 
 
3680540
c692452
 
 
 
 
 
 
7f188a6
 
c692452
7f188a6
c692452
 
 
7f188a6
c692452
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
import streamlit as st
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from transformers import CLIPModel
from transformers.models.clip import CLIPModel  
from PIL import Image
import numpy as np
import io
import base64
import cv2
import matplotlib.pyplot as plt
from peft import PeftModel
from unsloth import FastVisionModel
import os
import tempfile
import warnings
warnings.filterwarnings("ignore", category=UserWarning)

# App title and description
st.set_page_config(
    page_title="Deepfake Analyzer", 
    layout="wide",
    page_icon="πŸ”"
)

# Main title and description
st.title("Advanced Deepfake Image Analyzer")
st.markdown("Analyze images for deepfake manipulation with multi-stage analysis")

# Check for GPU availability
def check_gpu():
    if torch.cuda.is_available():
        gpu_info = torch.cuda.get_device_properties(0)
        st.sidebar.success(f"βœ… GPU available: {gpu_info.name} ({gpu_info.total_memory / (1024**3):.2f} GB)")
        return True
    else:
        st.sidebar.warning("⚠️ No GPU detected. Analysis will be slower.")
        return False

# Sidebar components
st.sidebar.title("Options")

# Temperature slider
temperature = st.sidebar.slider(
    "Temperature", 
    min_value=0.1, 
    max_value=1.0, 
    value=0.7, 
    step=0.1,
    help="Higher values make output more random, lower values more deterministic"
)

# Max response length slider
max_tokens = st.sidebar.slider(
    "Maximum Response Length", 
    min_value=100, 
    max_value=1000, 
    value=500, 
    step=50,
    help="The maximum number of tokens in the response"
)

# Custom instruction text area in sidebar
custom_instruction = st.sidebar.text_area(
    "Custom Instructions (Advanced)",
    value="Focus on analyzing the highlighted regions from the GradCAM visualization. Examine facial inconsistencies, lighting irregularities, and other artifacts visible in the heat map.",
    help="Add specific instructions for the LLM analysis"
)

# About section in sidebar
st.sidebar.markdown("---")
st.sidebar.subheader("About")
st.sidebar.markdown("""
This analyzer performs multi-stage detection:
1. **Initial Detection**: CLIP-based classifier
2. **GradCAM Visualization**: Highlights suspicious regions
3. **LLM Analysis**: Fine-tuned Llama 3.2 Vision provides detailed explanations

The system looks for:
- Facial inconsistencies
- Unnatural movements
- Lighting issues
- Texture anomalies
- Edge artifacts
- Blending problems
""")

# ----- GradCAM Implementation -----

class ImageDataset(torch.utils.data.Dataset):
    def __init__(self, image, transform=None, face_only=True, dataset_name=None):
        self.image = image
        self.transform = transform
        self.face_only = face_only
        self.dataset_name = dataset_name
        # Load face detector
        self.face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
        
    def __len__(self):
        return 1  # Only one image

    def detect_face(self, image_np):
        """Detect face in image and return the face region"""
        gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
        faces = self.face_detector.detectMultiScale(gray, 1.1, 5)
        
        # If no face is detected, use the whole image
        if len(faces) == 0:
            st.info("No face detected, using whole image for analysis")
            h, w = image_np.shape[:2]
            return (0, 0, w, h), image_np
            
        # Get the largest face
        if len(faces) > 1:
            # Choose the largest face by area
            areas = [w*h for (x, y, w, h) in faces]
            largest_idx = np.argmax(areas)
            x, y, w, h = faces[largest_idx]
        else:
            x, y, w, h = faces[0]
            
        # Add padding around the face (5% on each side)
        padding_x = int(w * 0.05)
        padding_y = int(h * 0.05)
        
        # Ensure padding doesn't go outside image bounds
        x1 = max(0, x - padding_x)
        y1 = max(0, y - padding_y)
        x2 = min(image_np.shape[1], x + w + padding_x)
        y2 = min(image_np.shape[0], y + h + padding_y)
        
        # Extract the face region
        face_img = image_np[y1:y2, x1:x2]
        
        return (x1, y1, x2-x1, y2-y1), face_img

    def __getitem__(self, idx):
        image_np = np.array(self.image)
        label = 0  # Default label; will be overridden by prediction
        
        # Store original image for visualization
        original_image = self.image.copy()
        
        # Detect face if required
        if self.face_only:
            face_box, face_img_np = self.detect_face(image_np)
            face_img = Image.fromarray(face_img_np)
            
            # Apply transform to face image
            if self.transform:
                face_tensor = self.transform(face_img)
            else:
                face_tensor = transforms.ToTensor()(face_img)
            
            return face_tensor, label, "uploaded_image", original_image, face_box, self.dataset_name
        else:
            # Process the whole image
            if self.transform:
                image_tensor = self.transform(self.image)
            else:
                image_tensor = transforms.ToTensor()(self.image)
                
            return image_tensor, label, "uploaded_image", original_image, None, self.dataset_name

class GradCAM:
    def __init__(self, model, target_layer):
        self.model = model
        self.target_layer = target_layer
        self.gradients = None
        self.activations = None
        self._register_hooks()

    def _register_hooks(self):
        def forward_hook(module, input, output):
            if isinstance(output, tuple):
                self.activations = output[0]
            else:
                self.activations = output

        def backward_hook(module, grad_in, grad_out):
            if isinstance(grad_out, tuple):
                self.gradients = grad_out[0]
            else:
                self.gradients = grad_out

        layer = dict([*self.model.named_modules()])[self.target_layer]
        layer.register_forward_hook(forward_hook)
        layer.register_backward_hook(backward_hook)

    def generate(self, input_tensor, class_idx):
        self.model.zero_grad()
        
        try:
            # Use only the vision part of the model for gradient calculation
            vision_outputs = self.model.vision_model(pixel_values=input_tensor)
            
            # Get the pooler output
            features = vision_outputs.pooler_output
            
            # Create a dummy gradient for the feature based on the class idx
            one_hot = torch.zeros_like(features)
            one_hot[0, class_idx] = 1
            
            # Manually backpropagate
            features.backward(gradient=one_hot)

            # Check for None values
            if self.gradients is None or self.activations is None:
                st.warning("Warning: Gradients or activations are None. Using fallback CAM.")
                return np.ones((14, 14), dtype=np.float32) * 0.5

            # Process gradients and activations for transformer-based model
            gradients = self.gradients.cpu().detach().numpy()
            activations = self.activations.cpu().detach().numpy()
            
            if len(activations.shape) == 3:  # [batch, sequence_length, hidden_dim]
                seq_len = activations.shape[1]
                
                # CLIP ViT typically has 196 patch tokens (14Γ—14) + 1 class token = 197
                if seq_len >= 197:
                    # Skip the class token (first token) and reshape the patch tokens into a square
                    patch_tokens = activations[0, 1:197, :]  # Remove the class token
                    # Take the mean across the hidden dimension
                    token_importance = np.mean(np.abs(patch_tokens), axis=1)
                    # Reshape to the expected grid size (14Γ—14 for CLIP ViT)
                    cam = token_importance.reshape(14, 14)
                else:
                    # Try to find factors close to a square
                    side_len = int(np.sqrt(seq_len))
                    # Use the mean across features as importance
                    token_importance = np.mean(np.abs(activations[0]), axis=1)
                    # Create as square-like shape as possible
                    cam = np.zeros((side_len, side_len))
                    # Fill the cam with available values
                    flat_cam = cam.flatten()
                    flat_cam[:min(len(token_importance), len(flat_cam))] = token_importance[:min(len(token_importance), len(flat_cam))]
                    cam = flat_cam.reshape(side_len, side_len)
            else:
                # Fallback
                st.info("Using fallback CAM shape (14x14)")
                cam = np.ones((14, 14), dtype=np.float32) * 0.5  # Default fallback

            # Ensure we have valid values
            cam = np.maximum(cam, 0)
            if np.max(cam) > 0:
                cam = cam / np.max(cam)
            
            return cam
            
        except Exception as e:
            st.error(f"Error in GradCAM.generate: {str(e)}")
            return np.ones((14, 14), dtype=np.float32) * 0.5

def overlay_cam_on_image(image, cam, face_box=None, alpha=0.5):
    """Overlay the CAM on the image"""
    if face_box is not None:
        x, y, w, h = face_box
        # Create a mask for the entire image (all zeros initially)
        img_np = np.array(image)
        full_h, full_w = img_np.shape[:2]
        full_cam = np.zeros((full_h, full_w), dtype=np.float32)
        
        # Resize CAM to match face region
        face_cam = cv2.resize(cam, (w, h))
        
        # Copy the face CAM into the full image CAM at the face position
        full_cam[y:y+h, x:x+w] = face_cam
        
        # Convert full CAM to image
        cam_resized = Image.fromarray((full_cam * 255).astype(np.uint8))
        cam_colormap = plt.cm.jet(np.array(cam_resized) / 255.0)[:, :, :3]  # Apply colormap
        cam_colormap = (cam_colormap * 255).astype(np.uint8)
    else:
        # Resize CAM to match image dimensions
        img_np = np.array(image)
        h, w = img_np.shape[:2]
        cam_resized = cv2.resize(cam, (w, h))
        
        # Apply colormap
        cam_colormap = plt.cm.jet(cam_resized)[:, :, :3]  # Apply colormap
        cam_colormap = (cam_colormap * 255).astype(np.uint8)

    # Blend the original image with the colormap
    img_np_float = img_np.astype(float) / 255.0
    cam_colormap_float = cam_colormap.astype(float) / 255.0
    
    blended = img_np_float * (1 - alpha) + cam_colormap_float * alpha
    blended = (blended * 255).astype(np.uint8)
    
    return Image.fromarray(blended)

def save_comparison(image, cam, overlay, face_box=None):
    """Create a side-by-side comparison of the original, CAM, and overlay"""
    fig, axes = plt.subplots(1, 3, figsize=(15, 5))

    # Original Image
    axes[0].imshow(image)
    axes[0].set_title("Original")
    if face_box is not None:
        x, y, w, h = face_box
        rect = plt.Rectangle((x, y), w, h, edgecolor='lime', linewidth=2, fill=False)
        axes[0].add_patch(rect)
    axes[0].axis("off")

    # CAM
    if face_box is not None:
        # Create a full image CAM that highlights only the face
        img_np = np.array(image)
        h, w = img_np.shape[:2]
        full_cam = np.zeros((h, w))
        
        x, y, fw, fh = face_box
        # Resize CAM to face size
        face_cam = cv2.resize(cam, (fw, fh))
        # Place it in the right position
        full_cam[y:y+fh, x:x+fw] = face_cam
        axes[1].imshow(full_cam, cmap="jet")
    else:
        cam_resized = cv2.resize(cam, (image.width, image.height))
        axes[1].imshow(cam_resized, cmap="jet")
    axes[1].set_title("CAM")
    axes[1].axis("off")

    # Overlay
    axes[2].imshow(overlay)
    axes[2].set_title("Overlay")
    axes[2].axis("off")

    plt.tight_layout()
    
    # Convert plot to PIL Image for Streamlit display
    buf = io.BytesIO()
    plt.savefig(buf, format="png", bbox_inches="tight")
    plt.close()
    buf.seek(0)
    return Image.open(buf)

# Function to load GradCAM CLIP model
@st.cache_resource
def load_clip_model():
    with st.spinner("Loading CLIP model for GradCAM..."):
        model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
        
        # Apply a simple classification head
        model.classification_head = nn.Linear(1024, 2)
        model.classification_head.weight.data.normal_(mean=0.0, std=0.02)
        model.classification_head.bias.data.zero_()
        
        model.eval()
        return model

def get_target_layer_clip(model):
    """Get the target layer for GradCAM"""
    return "vision_model.encoder.layers.23"

def process_image_with_gradcam(image, model, device, pred_class):
    """Process an image with GradCAM"""
    # Set up transformations
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
    ])
    
    # Create dataset for the single image
    dataset = ImageDataset(image, transform=transform, face_only=True)
    
    # Custom collate function
    def custom_collate(batch):
        tensors = [item[0] for item in batch]
        labels = [item[1] for item in batch]
        paths = [item[2] for item in batch]
        images = [item[3] for item in batch]
        face_boxes = [item[4] for item in batch]
        dataset_names = [item[5] for item in batch]
        
        tensors = torch.stack(tensors)
        labels = torch.tensor(labels)
        
        return tensors, labels, paths, images, face_boxes, dataset_names
    
    # Create dataloader
    dataloader = DataLoader(dataset, batch_size=1, shuffle=False, collate_fn=custom_collate)
    
    # Extract the batch
    for batch in dataloader:
        input_tensor, label, img_paths, original_images, face_boxes, dataset_names = batch
        original_image = original_images[0]
        face_box = face_boxes[0]
        
        # Move tensors and model to device
        input_tensor = input_tensor.to(device)
        model = model.to(device)
        
        try:
            # Create GradCAM extractor
            target_layer = get_target_layer_clip(model)
            cam_extractor = GradCAM(model, target_layer)
            
            # Generate CAM
            cam = cam_extractor.generate(input_tensor, pred_class)
            
            # Create visualizations
            overlay = overlay_cam_on_image(original_image, cam, face_box)
            comparison = save_comparison(original_image, cam, overlay, face_box)
            
            # Return results
            return cam, overlay, comparison, face_box
            
        except Exception as e:
            st.error(f"Error processing image with GradCAM: {str(e)}")
            # Return default values
            default_cam = np.ones((14, 14), dtype=np.float32) * 0.5
            overlay = overlay_cam_on_image(original_image, default_cam, face_box)
            comparison = save_comparison(original_image, default_cam, overlay, face_box)
            return default_cam, overlay, comparison, face_box

# ----- Fine-tuned Vision LLM -----

# Function to fix cross-attention masks
def fix_cross_attention_mask(inputs):
    if 'cross_attention_mask' in inputs and 0 in inputs['cross_attention_mask'].shape:
        batch_size, seq_len, _, num_tiles = inputs['cross_attention_mask'].shape
        visual_features = 6404  # Critical dimension
        new_mask = torch.ones((batch_size, seq_len, visual_features, num_tiles),
                            device=inputs['cross_attention_mask'].device)
        inputs['cross_attention_mask'] = new_mask
        st.success("Fixed cross-attention mask dimensions")
    return inputs

# Load model function
@st.cache_resource
def load_llm_model():
    with st.spinner("Loading LLM vision model... This may take a few minutes. Please be patient..."):
        try:
            # Check for GPU
            has_gpu = check_gpu()
            
            # Load base model and tokenizer using Unsloth
            base_model_id = "unsloth/llama-3.2-11b-vision-instruct"
            model, tokenizer = FastVisionModel.from_pretrained(
                base_model_id,
                load_in_4bit=True,
            )

            # Load the adapter
            adapter_id = "saakshigupta/deepfake-explainer-1"
            model = PeftModel.from_pretrained(model, adapter_id)

            # Set to inference mode
            FastVisionModel.for_inference(model)
            
            return model, tokenizer
        except Exception as e:
            st.error(f"Error loading model: {str(e)}")
            return None, None

# Analyze image function
def analyze_image_with_llm(image, gradcam_overlay, face_box, pred_label, confidence, question, model, tokenizer, temperature=0.7, max_tokens=500, custom_instruction=""):
    # Create a prompt that includes GradCAM information
    if custom_instruction.strip():
        full_prompt = f"{question}\n\nThe image has been processed with GradCAM and classified as {pred_label} with confidence {confidence:.2f}. Focus on the highlighted regions in red/yellow which show the areas the detection model found suspicious.\n\n{custom_instruction}"
    else:
        full_prompt = f"{question}\n\nThe image has been processed with GradCAM and classified as {pred_label} with confidence {confidence:.2f}. Focus on the highlighted regions in red/yellow which show the areas the detection model found suspicious."
    
    # Format the message to include both the original image and the GradCAM visualization
    messages = [
        {"role": "user", "content": [
            {"type": "image", "image": image},  # Original image
            {"type": "image", "image": gradcam_overlay},  # GradCAM overlay
            {"type": "text", "text": full_prompt}
        ]}
    ]

    # Apply chat template
    input_text = tokenizer.apply_chat_template(messages, add_generation_prompt=True)

    # Process with image
    inputs = tokenizer(
        [image, gradcam_overlay],  # Send both images
        input_text,
        add_special_tokens=False,
        return_tensors="pt",
    ).to(model.device)

    # Fix cross-attention mask if needed
    inputs = fix_cross_attention_mask(inputs)

    # Generate response
    with st.spinner("Generating detailed analysis... (this may take 15-30 seconds)"):
        with torch.no_grad():
            output_ids = model.generate(
                **inputs,
                max_new_tokens=max_tokens,
                use_cache=True,
                temperature=temperature,
                top_p=0.9
            )

        # Decode the output
        response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
        
        # Try to extract just the model's response (after the prompt)
        if full_prompt in response:
            result = response.split(full_prompt)[-1].strip()
        else:
            result = response
            
        return result

# Main app
def main():
    # Create placeholders for model state
    if 'clip_model_loaded' not in st.session_state:
        st.session_state.clip_model_loaded = False
        st.session_state.clip_model = None
        
    if 'llm_model_loaded' not in st.session_state:
        st.session_state.llm_model_loaded = False
        st.session_state.llm_model = None
        st.session_state.tokenizer = None
    
    # Create expanders for each stage
    with st.expander("Stage 1: Model Loading", expanded=True):
        # Button for loading CLIP model
        clip_col, llm_col = st.columns(2)
        
        with clip_col:
            if not st.session_state.clip_model_loaded:
                if st.button("πŸ“₯ Load CLIP Model for Detection", type="primary"):
                    # Load CLIP model
                    model = load_clip_model()
                    if model is not None:
                        st.session_state.clip_model = model
                        st.session_state.clip_model_loaded = True
                        st.success("βœ… CLIP model loaded successfully!")
                    else:
                        st.error("❌ Failed to load CLIP model.")
            else:
                st.success("βœ… CLIP model loaded and ready!")
        
        with llm_col:
            if not st.session_state.llm_model_loaded:
                if st.button("πŸ“₯ Load Vision LLM for Analysis", type="primary"):
                    # Load LLM model
                    model, tokenizer = load_llm_model()
                    if model is not None and tokenizer is not None:
                        st.session_state.llm_model = model
                        st.session_state.tokenizer = tokenizer
                        st.session_state.llm_model_loaded = True
                        st.success("βœ… Vision LLM loaded successfully!")
                    else:
                        st.error("❌ Failed to load Vision LLM.")
            else:
                st.success("βœ… Vision LLM loaded and ready!")
    
    # Image upload section
    with st.expander("Stage 2: Image Upload & Initial Detection", expanded=True):
        st.subheader("Upload an Image")
        uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
        
        if uploaded_file is not None:
            # Display the uploaded image
            image = Image.open(uploaded_file).convert("RGB")
            st.image(image, caption="Uploaded Image", use_column_width=True)
            
            # Detect with CLIP model if loaded
            if st.session_state.clip_model_loaded:
                with st.spinner("Analyzing image with CLIP model..."):
                    # Preprocess image for CLIP
                    transform = transforms.Compose([
                        transforms.Resize((224, 224)),
                        transforms.ToTensor(),
                        transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]),
                    ])
                    
                    # Create a simple dataset for the image
                    dataset = ImageDataset(image, transform=transform, face_only=True)
                    tensor, _, _, _, face_box, _ = dataset[0]
                    tensor = tensor.unsqueeze(0)
                    
                    # Get device
                    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
                    
                    # Move model and tensor to device
                    model = st.session_state.clip_model.to(device)
                    tensor = tensor.to(device)
                    
                    # Forward pass
                    with torch.no_grad():
                        outputs = model.vision_model(pixel_values=tensor).pooler_output
                        logits = model.classification_head(outputs)
                        probs = torch.softmax(logits, dim=1)[0]
                        pred_class = torch.argmax(probs).item()
                        confidence = probs[pred_class].item()
                        pred_label = "Fake" if pred_class == 1 else "Real"
                    
                    # Display results
                    result_col1, result_col2 = st.columns(2)
                    with result_col1:
                        st.metric("Prediction", pred_label)
                    with result_col2:
                        st.metric("Confidence", f"{confidence:.2%}")
                    
                    # GradCAM visualization
                    st.subheader("GradCAM Visualization")
                    cam, overlay, comparison, detected_face_box = process_image_with_gradcam(
                        image, model, device, pred_class
                    )
                    
                    # Display GradCAM results
                    st.image(comparison, caption="Original | CAM | Overlay", use_column_width=True)
                    
                    # Save results in session state for LLM analysis
                    st.session_state.current_image = image
                    st.session_state.current_overlay = overlay
                    st.session_state.current_face_box = detected_face_box
                    st.session_state.current_pred_label = pred_label
                    st.session_state.current_confidence = confidence
                    
                    st.success("βœ… Initial detection and GradCAM visualization complete!")
            else:
                st.warning("⚠️ Please load the CLIP model first to perform initial detection.")
    
    # LLM Analysis section
    with st.expander("Stage 3: Detailed Analysis with Vision LLM", expanded=False):
        if hasattr(st.session_state, 'current_image') and st.session_state.llm_model_loaded:
            st.subheader("Detailed Deepfake Analysis")
            
            # Default question with option to customize
            default_question = f"This image has been classified as {st.session_state.current_pred_label}. Analyze the key features that led to this classification, focusing on the highlighted areas in the GradCAM visualization. Provide both a technical explanation for experts and a simple explanation for non-technical users."
            question = st.text_area("Question/Prompt:", value=default_question, height=100)
            
            # Analyze button
            if st.button("πŸ” Perform Detailed Analysis", type="primary"):
                result = analyze_image_with_llm(
                    st.session_state.current_image,
                    st.session_state.current_overlay,
                    st.session_state.current_face_box,
                    st.session_state.current_pred_label,
                    st.session_state.current_confidence,
                    question,
                    st.session_state.llm_model,
                    st.session_state.tokenizer,
                    temperature=temperature,
                    max_tokens=max_tokens,
                    custom_instruction=custom_instruction
                )
                
                # Display results
                st.success("βœ… Analysis complete!")
                
                # Check if the result contains both technical and non-technical explanations
                if "Technical" in result and "Non-Technical" in result:
                    # Split the result into technical and non-technical sections
                    parts = result.split("Non-Technical")
                    technical = parts[0]
                    non_technical = "Non-Technical" + parts[1]
                    
                    # Display in two columns
                    col1, col2 = st.columns(2)
                    with col1:
                        st.subheader("Technical Analysis")
                        st.markdown(technical)
                    
                    with col2:
                        st.subheader("Simple Explanation")
                        st.markdown(non_technical)
                else:
                    # Just display the whole result
                    st.subheader("Analysis Result")
                    st.markdown(result)
        elif not hasattr(st.session_state, 'current_image'):
            st.warning("⚠️ Please upload an image and complete the initial detection first.")
        else:
            st.warning("⚠️ Please load the Vision LLM to perform detailed analysis.")
    
    # Footer
    st.markdown("---")
    st.caption("Advanced Deepfake Image Analyzer")

if __name__ == "__main__":
    main()