abdull4h commited on
Commit
a94b356
·
verified ·
1 Parent(s): 0f16575

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +758 -359
app.py CHANGED
@@ -11,24 +11,353 @@ import warnings
11
  # Suppress warnings for cleaner output
12
  warnings.filterwarnings("ignore")
13
 
14
- # Custom CSS for better styling
15
- custom_css = """
 
 
 
 
16
  .gradio-container {
17
- max-width: 1200px !important;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  .gpt-oss-badge {
20
- background: linear-gradient(45deg, #00c6ff, #0072ff);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  color: white;
22
- padding: 5px 10px;
23
- border-radius: 15px;
 
 
 
 
 
 
 
24
  font-weight: bold;
25
  }
26
- .status-success {
27
- background: #d4edda;
28
- border: 1px solid #c3e6cb;
29
- color: #155724;
30
- padding: 10px;
31
- border-radius: 5px;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  }
33
  """
34
 
@@ -42,7 +371,6 @@ def initialize_gpt_oss_safe():
42
  """Initialize GPT-OSS-20B with multiple fallback strategies"""
43
  global model, tokenizer, model_status
44
 
45
- # Strategy 1: Try GPT-OSS-20B with specific settings
46
  strategies = [
47
  {
48
  "model_id": "openai/gpt-oss-20b",
@@ -65,16 +393,6 @@ def initialize_gpt_oss_safe():
65
  "low_cpu_mem_usage": True
66
  }
67
  },
68
- {
69
- "model_id": "openai/gpt-oss-20b",
70
- "name": "GPT-OSS-20B (FP16)",
71
- "config": {
72
- "torch_dtype": torch.float16,
73
- "device_map": "auto",
74
- "trust_remote_code": True,
75
- "low_cpu_mem_usage": True
76
- }
77
- },
78
  {
79
  "model_id": "microsoft/DialoGPT-large",
80
  "name": "DialoGPT-Large (Fallback)",
@@ -86,11 +404,6 @@ def initialize_gpt_oss_safe():
86
  ]
87
 
88
  device = "cuda" if torch.cuda.is_available() else "cpu"
89
- print(f"🔧 Device: {device}")
90
-
91
- if torch.cuda.is_available():
92
- print(f"🎮 GPU: {torch.cuda.get_device_name()}")
93
- print(f"💾 GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f}GB")
94
 
95
  for i, strategy in enumerate(strategies):
96
  try:
@@ -98,107 +411,54 @@ def initialize_gpt_oss_safe():
98
  config = strategy["config"]
99
  name = strategy["name"]
100
 
101
- print(f"\n🔄 Strategy {i+1}: Trying {name}")
102
- print(f"📦 Model: {model_id}")
103
 
104
- # Load tokenizer first
105
- print("🔤 Loading tokenizer...")
106
  tokenizer = AutoTokenizer.from_pretrained(
107
  model_id,
108
  trust_remote_code=True,
109
  use_fast=True
110
  )
111
 
112
- # Handle pad token for generation
113
  if tokenizer.pad_token is None:
114
  tokenizer.pad_token = tokenizer.eos_token
115
 
116
- print("✅ Tokenizer loaded successfully")
117
-
118
- # Load model with strategy-specific config
119
- print("🧠 Loading model...")
120
  model = AutoModelForCausalLM.from_pretrained(
121
  model_id,
122
  **config
123
  )
124
 
125
- print(f"📍 Model device: {next(model.parameters()).device}")
126
- print(f"🔢 Model dtype: {next(model.parameters()).dtype}")
127
-
128
- # Test generation to ensure everything works
129
- print("🧪 Testing generation...")
130
- test_messages = [{"role": "user", "content": "Hello, test."}]
131
-
132
- try:
133
- # Use pipeline for simpler testing
134
- test_pipe = pipeline(
135
- "text-generation",
136
- model=model,
137
- tokenizer=tokenizer,
138
- torch_dtype=config.get("torch_dtype", "auto"),
139
- device_map="auto" if torch.cuda.is_available() else None
140
- )
141
-
142
- test_result = test_pipe(
143
- test_messages,
144
- max_new_tokens=10,
145
- do_sample=False
146
- )
147
-
148
- print("✅ Generation test successful")
149
-
150
- except Exception as test_error:
151
- print(f"⚠️ Pipeline test failed, trying direct generation: {test_error}")
152
-
153
- # Fallback to direct generation
154
- inputs = tokenizer.apply_chat_template(
155
- test_messages,
156
- add_generation_prompt=True,
157
- return_tensors="pt",
158
- return_dict=True,
159
- )
160
-
161
- if torch.cuda.is_available():
162
- inputs = {k: v.to(model.device) for k, v in inputs.items()}
163
-
164
- with torch.no_grad():
165
- outputs = model.generate(
166
- **inputs,
167
- max_new_tokens=5,
168
- do_sample=False,
169
- pad_token_id=tokenizer.eos_token_id
170
- )
171
-
172
- print("✅ Direct generation test successful")
173
-
174
- # If we get here, the strategy worked
175
  model_status = f"✅ {name} loaded successfully on {device}"
176
- print(f"🎉 Success: {model_status}")
177
- return model_status
178
 
179
  except Exception as e:
180
- error_msg = str(e)
181
- print(f"❌ Strategy {i+1} failed: {error_msg[:100]}...")
182
-
183
- # Clear any partially loaded components
184
  model, tokenizer = None, None
185
-
186
- # Clear GPU memory if available
187
  if torch.cuda.is_available():
188
  torch.cuda.empty_cache()
189
-
190
  continue
191
 
192
- # If all strategies failed
193
- model_status = "❌ All model loading strategies failed. Using text-only fallback."
194
- print(model_status)
195
- return model_status
196
 
197
- # Enhanced attack scenarios (keeping the same as before)
198
  ATTACK_SCENARIOS = {
199
  "🔄 Lateral Movement": {
200
  "description": "Advanced Persistent Threat (APT) - Attacker moving laterally through network after initial compromise",
201
  "severity": "Critical",
 
 
202
  "alerts": [
203
  {
204
  "id": "ALR-001",
@@ -247,6 +507,8 @@ ATTACK_SCENARIOS = {
247
  "📧 Phishing Campaign": {
248
  "description": "Email-based social engineering attack leading to credential theft and data exfiltration",
249
  "severity": "High",
 
 
250
  "alerts": [
251
  {
252
  "id": "ALR-004",
@@ -281,6 +543,8 @@ ATTACK_SCENARIOS = {
281
  "🔒 Ransomware Attack": {
282
  "description": "File encryption attack with ransom demand - likely REvil/Sodinokibi variant",
283
  "severity": "Critical",
 
 
284
  "alerts": [
285
  {
286
  "id": "ALR-006",
@@ -314,12 +578,31 @@ ATTACK_SCENARIOS = {
314
  }
315
  }
316
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317
  @spaces.GPU
318
  def generate_analysis_safe(alert_data, analyst_level):
319
  """Generate analysis with safe error handling"""
320
 
321
  if not model or not tokenizer:
322
- return get_fallback_analysis(alert_data, analyst_level)
323
 
324
  security_prompts = {
325
  "L1": f"""As a Level 1 SOC analyst, provide immediate triage for this security alert:
@@ -358,163 +641,180 @@ Provide: strategic assessment, attribution analysis, response plan."""
358
  try:
359
  prompt = security_prompts.get(analyst_level, security_prompts["L2"])
360
 
361
- # Try pipeline approach first (safer)
362
- try:
363
- pipe = pipeline(
364
- "text-generation",
365
- model=model,
366
- tokenizer=tokenizer,
367
- torch_dtype="auto",
368
- device_map="auto" if torch.cuda.is_available() else None
369
- )
370
-
371
- messages = [{"role": "user", "content": prompt}]
372
-
373
- result = pipe(
374
- messages,
375
- max_new_tokens=400,
376
- do_sample=True,
377
- temperature=0.3,
378
- top_p=0.9,
379
- repetition_penalty=1.1,
380
- pad_token_id=tokenizer.eos_token_id
381
- )
382
-
383
- analysis = result[0]["generated_text"][-1]["content"]
384
-
385
- except Exception as pipe_error:
386
- print(f"Pipeline failed, trying direct generation: {pipe_error}")
387
-
388
- # Fallback to direct generation
389
- messages = [{"role": "user", "content": prompt}]
390
-
391
- inputs = tokenizer.apply_chat_template(
392
- messages,
393
- add_generation_prompt=True,
394
- return_tensors="pt",
395
- return_dict=True,
396
- )
397
-
398
- if torch.cuda.is_available():
399
- inputs = {k: v.to(model.device) for k, v in inputs.items()}
400
-
401
- with torch.no_grad():
402
- outputs = model.generate(
403
- **inputs,
404
- max_new_tokens=400,
405
- do_sample=True,
406
- temperature=0.3,
407
- top_p=0.9,
408
- repetition_penalty=1.1,
409
- pad_token_id=tokenizer.eos_token_id,
410
- eos_token_id=tokenizer.eos_token_id
411
- )
412
-
413
- input_length = inputs["input_ids"].shape[-1]
414
- generated_tokens = outputs[0][input_length:]
415
- analysis = tokenizer.decode(generated_tokens, skip_special_tokens=True)
416
 
417
- # Quality check
418
- if len(analysis.strip()) < 50:
419
- return get_fallback_analysis(alert_data, analyst_level)
420
 
421
- # Determine model name for display
422
- if "gpt-oss" in model_status.lower():
423
- badge = "🤖 OpenAI GPT-OSS-20B Analysis"
424
- else:
425
- badge = "🤖 AI-Powered Analysis"
 
 
 
 
426
 
427
- return f"""{badge}
428
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
429
-
430
- {analysis.strip()}
431
-
432
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
433
- *Generated using advanced AI reasoning capabilities*"""
 
 
 
 
 
 
 
 
 
 
 
434
 
435
  except Exception as e:
436
  print(f"Generation error: {e}")
437
- return f"⚠️ AI Error: {str(e)[:100]}\n\n{get_fallback_analysis(alert_data, analyst_level)}"
438
 
439
- def get_fallback_analysis(alert_data, analyst_level):
440
- """High-quality fallback analysis"""
 
 
 
441
 
442
  templates = {
443
- "L1": f"""🚨 **L1 SOC TRIAGE ANALYSIS**
444
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
445
-
446
- **🎯 THREAT ASSESSMENT:**
447
- Alert: {alert_data['alert_type']} | Severity: {alert_data['severity']}
448
- Confidence: {alert_data['confidence']}% | Source: {alert_data['source_ip']}
449
-
450
- **⚡ IMMEDIATE ACTIONS:**
451
- 1. Isolate affected system: {alert_data['source_ip']}
452
- 2. Disable user account: {alert_data['user']}
453
- 3. Block connections to: {alert_data['destination_ip']}
454
- 4. Preserve evidence and logs
455
-
456
- **⬆️ ESCALATION DECISION:**
457
- Severity: {alert_data['severity']} → ESCALATE TO L2
458
- Technique: {alert_data['mitre_tactic']} requires deeper analysis
459
-
460
- **📝 INITIAL NOTES:**
461
- {alert_data['threat_intel']}""",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
462
 
463
- "L2": f"""🔍 **L2 INVESTIGATION ANALYSIS**
464
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
465
 
466
- **🎯 ATTACK VECTOR:**
467
- Technique: {alert_data['mitre_tactic']}
468
- Evidence: {alert_data['raw_log']}
469
- Context: {alert_data['description']}
470
-
471
- **🔬 INVESTIGATION ROADMAP:**
472
- 1. Timeline correlation: ±30min window analysis
473
- 2. User behavior baseline: {alert_data['user']} comparison
474
- 3. Network flow analysis: {alert_data['source_ip']} → {alert_data['destination_ip']}
475
- 4. Process tree examination and artifact collection
476
- 5. Similar IOC hunting across environment
477
-
478
- **📊 THREAT ASSESSMENT:**
479
- Confidence Level: {alert_data['confidence']}%
480
- Business Impact: {alert_data['severity']}
481
- Attribution Context: {alert_data['threat_intel']}
482
-
483
- **🎯 NEXT STEPS:**
484
- Deploy hunting queries for similar TTPs
485
- Review authentication logs for compromise indicators
486
- Consider L3 escalation if campaign evidence found""",
487
-
488
- "L3": f"""🎯 **L3 EXPERT STRATEGIC ANALYSIS**
489
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
490
-
491
- **🎭 ADVERSARY ASSESSMENT:**
492
- Sophistication: Advanced (based on {alert_data['mitre_tactic']})
493
- Campaign Context: {alert_data['threat_intel']}
494
- Success Probability: {alert_data['confidence']}%
495
-
496
- **💼 BUSINESS IMPACT:**
497
- Severity Level: {alert_data['severity']}
498
- Executive Notification: Required for Critical/High
499
- Regulatory Implications: Under compliance review
500
-
501
- **🛡️ STRATEGIC RESPONSE:**
502
- Immediate: Threat hunting deployment across infrastructure
503
- Short-term: Enhanced monitoring and detection rule updates
504
- Medium-term: Security architecture review and gap analysis
505
- Long-term: Threat intelligence integration and training
506
-
507
- **📈 EXECUTIVE BRIEFING POINTS:**
508
- • Sophisticated attack requiring coordinated incident response
509
- • High potential for lateral movement and data exfiltration
510
- • Recommend immediate incident response team activation
511
- • Consider external forensics engagement for complex analysis"""
512
  }
513
 
514
  return templates.get(analyst_level, templates["L2"])
515
 
516
- def analyze_alert_with_ai(scenario_name, alert_index, analyst_level):
517
- """Main analysis function with error handling"""
518
  start_time = time.time()
519
 
520
  if scenario_name not in ATTACK_SCENARIOS:
@@ -531,181 +831,280 @@ def analyze_alert_with_ai(scenario_name, alert_index, analyst_level):
531
  # Generate analysis
532
  analysis = generate_analysis_safe(selected_alert, analyst_level)
533
 
534
- # Format alert details
535
- alert_details = f"""🎫 **ALERT {selected_alert['id']}** | 🕐 {selected_alert['timestamp']}
536
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
537
-
538
- 🌐 **NETWORK FLOW:**
539
- Source: {selected_alert['source_ip']} → Destination: {selected_alert['destination_ip']}
540
-
541
- 👤 **USER CONTEXT:**
542
- Account: {selected_alert['user']}
543
-
544
- ⚠️ **CLASSIFICATION:**
545
- Type: {selected_alert['alert_type']}
546
- Severity: {selected_alert['severity']}
547
- Confidence: {selected_alert['confidence']}%
548
-
549
- 📝 **DESCRIPTION:**
550
- {selected_alert['description']}
551
-
552
- 🔍 **TECHNICAL EVIDENCE:**
553
- {selected_alert['raw_log']}
554
-
555
- 🧠 **THREAT INTELLIGENCE:**
556
- {selected_alert['threat_intel']}
557
-
558
- 🎪 **MITRE ATT&CK:**
559
- {selected_alert['mitre_tactic']}
560
-
561
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
562
 
563
  processing_time = round(time.time() - start_time, 2)
564
  device_info = "GPU" if torch.cuda.is_available() else "CPU"
565
- status = f"✅ {analyst_level} analysis completed in {processing_time}s | Device: {device_info} | {model_status}"
 
 
 
 
566
 
567
  return alert_details, analysis, status
568
 
569
- def get_scenario_info(scenario_name):
570
- """Get scenario information"""
571
  if scenario_name in ATTACK_SCENARIOS:
572
  scenario = ATTACK_SCENARIOS[scenario_name]
573
 
574
- info = f"""## 🎭 **Attack Scenario: {scenario_name}**
575
-
576
- **📋 Description:** {scenario['description']}
577
- **⚠️ Severity:** {scenario['severity']}
578
- **📊 Total Alerts:** {len(scenario['alerts'])} security events
579
-
580
- ### 🔍 **Alert Timeline:**
581
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
582
 
583
  for i, alert in enumerate(scenario['alerts']):
584
- info += f"""**[{i+1}] {alert['timestamp']}** - {alert['alert_type']}
585
- └─ Severity: {alert['severity']} | Confidence: {alert['confidence']}%
586
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
587
 
588
  info += """
589
- ### 🤖 **AI Analysis Features:**
590
- - **Multi-Strategy Loading:** Automatic fallback to compatible models
591
- - **Advanced Reasoning:** Chain-of-thought analysis for complex threats
592
- - **Error Recovery:** Robust handling of model loading issues
593
- - **Quality Assurance:** Automatic fallback to expert templates"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
594
 
595
  return info
596
- return "⚠️ No scenario selected."
597
 
598
- # Create Gradio interface
599
- with gr.Blocks(title="SOC Assistant - Fixed GPT-OSS", theme=gr.themes.Soft(), css=custom_css) as demo:
600
-
601
- gr.Markdown("""
602
- # 🛡️ SOC LLM Assistant - Fixed GPT-OSS Edition
603
- **Multi-Strategy Model Loading with Robust Error Handling**
604
 
605
- *Automatically tries GPT-OSS-20B first, then falls back to compatible models*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
606
  """)
607
 
608
  # Model status display
609
- status_display = gr.Markdown("🔄 Initializing AI models with multiple strategies...")
610
 
611
  with gr.Row():
612
- # Left Panel
613
- with gr.Column(scale=1, min_width=300):
614
- gr.Markdown("## 🎮 Attack Simulation")
615
 
616
  scenario_dropdown = gr.Dropdown(
617
  choices=list(ATTACK_SCENARIOS.keys()),
618
  label="🎭 Select Attack Scenario",
619
  value="🔄 Lateral Movement",
620
- interactive=True
 
621
  )
622
 
623
- scenario_info = gr.Markdown()
624
 
625
- gr.Markdown("---")
626
- gr.Markdown("## ⚙️ Analysis Configuration")
627
 
628
  alert_slider = gr.Slider(
629
  minimum=0,
630
  maximum=2,
631
  step=1,
632
  value=0,
633
- label="📋 Alert Selection"
 
 
634
  )
635
 
636
  analyst_level = gr.Radio(
637
  choices=["L1", "L2", "L3"],
638
- label="👤 Analyst Level",
639
  value="L2",
640
- info="L1: Triage | L2: Investigation | L3: Expert"
 
641
  )
642
 
643
  analyze_btn = gr.Button(
644
- "🚀 Analyze with AI",
645
  variant="primary",
646
- size="lg"
 
647
  )
648
 
649
  init_btn = gr.Button(
650
- "🔄 Retry Model Loading",
651
- variant="secondary"
 
652
  )
653
 
654
- gr.Markdown("---")
655
- gr.Markdown("## 🔧 Loading Strategies")
656
- gr.Markdown("""
657
- **🎯 Automatic Fallback:**
658
- 1. GPT-OSS-20B (Original MXFP4)
659
- 2. GPT-OSS-20B (BF16)
660
- 3. GPT-OSS-20B (FP16)
661
- 4. DialoGPT-Large (Backup)
662
-
663
- **✅ Robust Error Handling**
 
 
 
 
 
 
 
 
 
664
  """)
665
 
666
- # Right Panel
667
  with gr.Column(scale=2):
668
- gr.Markdown("## 📋 Security Alert Details")
669
- alert_output = gr.Textbox(
670
- label="🎫 Alert Information",
671
- lines=15,
672
- interactive=False
673
  )
674
 
675
- gr.Markdown("## 🤖 AI-Powered Analysis")
676
- analysis_output = gr.Textbox(
677
- label="🧠 Security Analysis",
678
- lines=25,
679
- interactive=False
680
  )
681
 
682
- status_output = gr.Textbox(
683
- label="📊 Processing Status",
684
- lines=1,
685
- interactive=False
686
- )
687
-
688
- gr.Markdown("""
689
- ---
690
- ## 🔧 **Troubleshooting Guide**
691
-
692
- **If you see "ModelWrapper" error:**
693
- - ✅ **Fixed:** This version uses multiple loading strategies
694
- - 🔄 **Automatic:** Falls back to compatible models
695
- - 🛠️ **Manual:** Use "Retry Model Loading" button
696
-
697
- **Loading Strategy Order:**
698
- 1. **GPT-OSS-20B** - Latest OpenAI open-weight model
699
- 2. **Fallback Models** - Tested compatible alternatives
700
- 3. **Expert Templates** - High-quality manual analysis
701
 
702
- ---
703
- **👨‍🎓 Research:** Abdullah Alanazi | **🏛️ KAUST** | **👨‍🏫 Prof. Ali Shoker**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
704
  """)
705
 
706
  # Event handlers
707
  scenario_dropdown.change(
708
- fn=get_scenario_info,
709
  inputs=[scenario_dropdown],
710
  outputs=[scenario_info]
711
  )
@@ -723,7 +1122,7 @@ with gr.Blocks(title="SOC Assistant - Fixed GPT-OSS", theme=gr.themes.Soft(), cs
723
  )
724
 
725
  analyze_btn.click(
726
- fn=analyze_alert_with_ai,
727
  inputs=[scenario_dropdown, alert_slider, analyst_level],
728
  outputs=[alert_output, analysis_output, status_output]
729
  )
@@ -735,7 +1134,7 @@ with gr.Blocks(title="SOC Assistant - Fixed GPT-OSS", theme=gr.themes.Soft(), cs
735
 
736
  # Initialize on startup
737
  demo.load(
738
- fn=get_scenario_info,
739
  inputs=[scenario_dropdown],
740
  outputs=[scenario_info]
741
  )
 
11
  # Suppress warnings for cleaner output
12
  warnings.filterwarnings("ignore")
13
 
14
+ # Enhanced CSS for beautiful design
15
+ beautiful_css = """
16
+ /* Import Google Fonts */
17
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
18
+
19
+ /* Global Styles */
20
  .gradio-container {
21
+ max-width: 1400px !important;
22
+ margin: 0 auto !important;
23
+ font-family: 'Inter', sans-serif !important;
24
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
25
+ min-height: 100vh;
26
+ }
27
+
28
+ /* Header Styling */
29
+ .header-container {
30
+ background: rgba(255, 255, 255, 0.95);
31
+ backdrop-filter: blur(10px);
32
+ border-radius: 20px;
33
+ padding: 2rem;
34
+ margin: 1rem;
35
+ box-shadow: 0 20px 40px rgba(0, 0, 0, 0.1);
36
+ border: 1px solid rgba(255, 255, 255, 0.2);
37
+ }
38
+
39
+ /* Main Content Cards */
40
+ .content-card {
41
+ background: rgba(255, 255, 255, 0.98);
42
+ backdrop-filter: blur(15px);
43
+ border-radius: 16px;
44
+ padding: 1.5rem;
45
+ margin: 0.5rem;
46
+ box-shadow: 0 10px 30px rgba(0, 0, 0, 0.08);
47
+ border: 1px solid rgba(255, 255, 255, 0.3);
48
+ transition: all 0.3s ease;
49
+ }
50
+
51
+ .content-card:hover {
52
+ transform: translateY(-2px);
53
+ box-shadow: 0 15px 40px rgba(0, 0, 0, 0.12);
54
+ }
55
+
56
+ /* Status Indicators */
57
+ .status-success {
58
+ background: linear-gradient(135deg, #4CAF50, #45a049);
59
+ color: white;
60
+ padding: 12px 20px;
61
+ border-radius: 12px;
62
+ font-weight: 500;
63
+ box-shadow: 0 4px 15px rgba(76, 175, 80, 0.3);
64
+ border: none;
65
  }
66
+
67
+ .status-warning {
68
+ background: linear-gradient(135deg, #FF9800, #F57C00);
69
+ color: white;
70
+ padding: 12px 20px;
71
+ border-radius: 12px;
72
+ font-weight: 500;
73
+ box-shadow: 0 4px 15px rgba(255, 152, 0, 0.3);
74
+ }
75
+
76
+ .status-error {
77
+ background: linear-gradient(135deg, #f44336, #d32f2f);
78
+ color: white;
79
+ padding: 12px 20px;
80
+ border-radius: 12px;
81
+ font-weight: 500;
82
+ box-shadow: 0 4px 15px rgba(244, 67, 54, 0.3);
83
+ }
84
+
85
+ /* GPT-OSS Badge */
86
  .gpt-oss-badge {
87
+ background: linear-gradient(135deg, #667eea, #764ba2);
88
+ color: white;
89
+ padding: 8px 16px;
90
+ border-radius: 20px;
91
+ font-weight: 600;
92
+ font-size: 0.9rem;
93
+ display: inline-block;
94
+ box-shadow: 0 4px 15px rgba(102, 126, 234, 0.3);
95
+ margin: 0.5rem 0;
96
+ }
97
+
98
+ /* Alert Severity Badges */
99
+ .severity-critical {
100
+ background: linear-gradient(135deg, #dc3545, #c82333);
101
+ color: white;
102
+ padding: 4px 12px;
103
+ border-radius: 20px;
104
+ font-weight: 600;
105
+ font-size: 0.8rem;
106
+ display: inline-block;
107
+ box-shadow: 0 2px 8px rgba(220, 53, 69, 0.3);
108
+ }
109
+
110
+ .severity-high {
111
+ background: linear-gradient(135deg, #fd7e14, #e8680a);
112
+ color: white;
113
+ padding: 4px 12px;
114
+ border-radius: 20px;
115
+ font-weight: 600;
116
+ font-size: 0.8rem;
117
+ display: inline-block;
118
+ box-shadow: 0 2px 8px rgba(253, 126, 20, 0.3);
119
+ }
120
+
121
+ .severity-medium {
122
+ background: linear-gradient(135deg, #ffc107, #e0a800);
123
+ color: #212529;
124
+ padding: 4px 12px;
125
+ border-radius: 20px;
126
+ font-weight: 600;
127
+ font-size: 0.8rem;
128
+ display: inline-block;
129
+ box-shadow: 0 2px 8px rgba(255, 193, 7, 0.3);
130
+ }
131
+
132
+ /* Button Styling */
133
+ .primary-button {
134
+ background: linear-gradient(135deg, #667eea, #764ba2) !important;
135
+ border: none !important;
136
+ border-radius: 12px !important;
137
+ padding: 12px 24px !important;
138
+ font-weight: 600 !important;
139
+ font-size: 1rem !important;
140
+ transition: all 0.3s ease !important;
141
+ box-shadow: 0 6px 20px rgba(102, 126, 234, 0.4) !important;
142
+ }
143
+
144
+ .primary-button:hover {
145
+ transform: translateY(-2px) !important;
146
+ box-shadow: 0 8px 25px rgba(102, 126, 234, 0.5) !important;
147
+ }
148
+
149
+ .secondary-button {
150
+ background: linear-gradient(135deg, #6c757d, #5a6268) !important;
151
+ border: none !important;
152
+ border-radius: 12px !important;
153
+ padding: 10px 20px !important;
154
+ font-weight: 500 !important;
155
+ color: white !important;
156
+ transition: all 0.3s ease !important;
157
+ }
158
+
159
+ /* Input Styling */
160
+ .custom-input {
161
+ border-radius: 12px !important;
162
+ border: 2px solid #e9ecef !important;
163
+ padding: 12px !important;
164
+ transition: all 0.3s ease !important;
165
+ background: rgba(255, 255, 255, 0.9) !important;
166
+ }
167
+
168
+ .custom-input:focus {
169
+ border-color: #667eea !important;
170
+ box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1) !important;
171
+ }
172
+
173
+ /* Section Headers */
174
+ .section-header {
175
+ font-size: 1.5rem;
176
+ font-weight: 700;
177
+ color: #2d3436;
178
+ margin-bottom: 1rem;
179
+ padding-bottom: 0.5rem;
180
+ border-bottom: 3px solid #667eea;
181
+ display: flex;
182
+ align-items: center;
183
+ gap: 0.5rem;
184
+ }
185
+
186
+ /* Alert Timeline */
187
+ .timeline-item {
188
+ background: rgba(102, 126, 234, 0.05);
189
+ border-left: 4px solid #667eea;
190
+ padding: 1rem;
191
+ margin: 0.5rem 0;
192
+ border-radius: 0 8px 8px 0;
193
+ transition: all 0.3s ease;
194
+ }
195
+
196
+ .timeline-item:hover {
197
+ background: rgba(102, 126, 234, 0.1);
198
+ transform: translateX(4px);
199
+ }
200
+
201
+ /* Analysis Output Styling */
202
+ .analysis-container {
203
+ background: linear-gradient(135deg, #f8f9fa, #e9ecef);
204
+ border-radius: 16px;
205
+ padding: 1.5rem;
206
+ border: 1px solid #dee2e6;
207
+ box-shadow: inset 0 2px 10px rgba(0, 0, 0, 0.05);
208
+ }
209
+
210
+ /* Confidence Meter */
211
+ .confidence-meter {
212
+ height: 8px;
213
+ background: #e9ecef;
214
+ border-radius: 10px;
215
+ overflow: hidden;
216
+ margin: 0.5rem 0;
217
+ }
218
+
219
+ .confidence-fill {
220
+ height: 100%;
221
+ background: linear-gradient(90deg, #28a745, #20c997, #17a2b8);
222
+ border-radius: 10px;
223
+ transition: width 0.5s ease;
224
+ }
225
+
226
+ /* Responsive Design */
227
+ @media (max-width: 768px) {
228
+ .gradio-container {
229
+ padding: 0.5rem;
230
+ }
231
+
232
+ .content-card {
233
+ margin: 0.25rem;
234
+ padding: 1rem;
235
+ }
236
+
237
+ .section-header {
238
+ font-size: 1.25rem;
239
+ }
240
+ }
241
+
242
+ /* Loading Animation */
243
+ .loading-spinner {
244
+ border: 3px solid #f3f3f3;
245
+ border-top: 3px solid #667eea;
246
+ border-radius: 50%;
247
+ width: 20px;
248
+ height: 20px;
249
+ animation: spin 1s linear infinite;
250
+ display: inline-block;
251
+ margin-right: 0.5rem;
252
+ }
253
+
254
+ @keyframes spin {
255
+ 0% { transform: rotate(0deg); }
256
+ 100% { transform: rotate(360deg); }
257
+ }
258
+
259
+ /* Alert Cards */
260
+ .alert-card {
261
+ background: white;
262
+ border-radius: 12px;
263
+ padding: 1.5rem;
264
+ margin: 0.5rem 0;
265
+ box-shadow: 0 4px 12px rgba(0, 0, 0, 0.08);
266
+ border-left: 5px solid #667eea;
267
+ transition: all 0.3s ease;
268
+ }
269
+
270
+ .alert-card:hover {
271
+ transform: translateY(-2px);
272
+ box-shadow: 0 6px 20px rgba(0, 0, 0, 0.12);
273
+ }
274
+
275
+ /* Network Flow Visualization */
276
+ .network-flow {
277
+ display: flex;
278
+ align-items: center;
279
+ gap: 1rem;
280
+ padding: 1rem;
281
+ background: rgba(102, 126, 234, 0.05);
282
+ border-radius: 12px;
283
+ margin: 0.5rem 0;
284
+ }
285
+
286
+ .network-node {
287
+ background: #667eea;
288
  color: white;
289
+ padding: 0.5rem 1rem;
290
+ border-radius: 8px;
291
+ font-weight: 500;
292
+ font-size: 0.9rem;
293
+ }
294
+
295
+ .network-arrow {
296
+ color: #667eea;
297
+ font-size: 1.5rem;
298
  font-weight: bold;
299
  }
300
+
301
+ /* MITRE ATT&CK Styling */
302
+ .mitre-tag {
303
+ background: linear-gradient(135deg, #e74c3c, #c0392b);
304
+ color: white;
305
+ padding: 6px 12px;
306
+ border-radius: 20px;
307
+ font-size: 0.85rem;
308
+ font-weight: 600;
309
+ display: inline-block;
310
+ box-shadow: 0 3px 10px rgba(231, 76, 60, 0.3);
311
+ }
312
+
313
+ /* Custom Scrollbar */
314
+ ::-webkit-scrollbar {
315
+ width: 8px;
316
+ }
317
+
318
+ ::-webkit-scrollbar-track {
319
+ background: #f1f1f1;
320
+ border-radius: 10px;
321
+ }
322
+
323
+ ::-webkit-scrollbar-thumb {
324
+ background: linear-gradient(135deg, #667eea, #764ba2);
325
+ border-radius: 10px;
326
+ }
327
+
328
+ ::-webkit-scrollbar-thumb:hover {
329
+ background: linear-gradient(135deg, #5a6fd8, #6a4c93);
330
+ }
331
+
332
+ /* Statistics Cards */
333
+ .stat-card {
334
+ background: white;
335
+ border-radius: 12px;
336
+ padding: 1.5rem;
337
+ text-align: center;
338
+ box-shadow: 0 4px 12px rgba(0, 0, 0, 0.08);
339
+ transition: all 0.3s ease;
340
+ border-top: 4px solid #667eea;
341
+ }
342
+
343
+ .stat-card:hover {
344
+ transform: translateY(-3px);
345
+ box-shadow: 0 8px 25px rgba(0, 0, 0, 0.12);
346
+ }
347
+
348
+ .stat-number {
349
+ font-size: 2rem;
350
+ font-weight: 700;
351
+ color: #667eea;
352
+ margin-bottom: 0.5rem;
353
+ }
354
+
355
+ .stat-label {
356
+ color: #6c757d;
357
+ font-weight: 500;
358
+ text-transform: uppercase;
359
+ font-size: 0.85rem;
360
+ letter-spacing: 0.5px;
361
  }
362
  """
363
 
 
371
  """Initialize GPT-OSS-20B with multiple fallback strategies"""
372
  global model, tokenizer, model_status
373
 
 
374
  strategies = [
375
  {
376
  "model_id": "openai/gpt-oss-20b",
 
393
  "low_cpu_mem_usage": True
394
  }
395
  },
 
 
 
 
 
 
 
 
 
 
396
  {
397
  "model_id": "microsoft/DialoGPT-large",
398
  "name": "DialoGPT-Large (Fallback)",
 
404
  ]
405
 
406
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
 
 
407
 
408
  for i, strategy in enumerate(strategies):
409
  try:
 
411
  config = strategy["config"]
412
  name = strategy["name"]
413
 
414
+ print(f"🔄 Trying {name}...")
 
415
 
 
 
416
  tokenizer = AutoTokenizer.from_pretrained(
417
  model_id,
418
  trust_remote_code=True,
419
  use_fast=True
420
  )
421
 
 
422
  if tokenizer.pad_token is None:
423
  tokenizer.pad_token = tokenizer.eos_token
424
 
 
 
 
 
425
  model = AutoModelForCausalLM.from_pretrained(
426
  model_id,
427
  **config
428
  )
429
 
430
+ # Test generation
431
+ test_messages = [{"role": "user", "content": "Test"}]
432
+ test_pipe = pipeline(
433
+ "text-generation",
434
+ model=model,
435
+ tokenizer=tokenizer,
436
+ torch_dtype=config.get("torch_dtype", "auto"),
437
+ device_map="auto" if torch.cuda.is_available() else None
438
+ )
439
+
440
+ test_pipe(test_messages, max_new_tokens=5, do_sample=False)
441
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
442
  model_status = f"✅ {name} loaded successfully on {device}"
443
+ return f'<div class="status-success">🎉 {model_status}</div>'
 
444
 
445
  except Exception as e:
446
+ print(f"❌ Strategy {i+1} failed: {str(e)[:100]}")
 
 
 
447
  model, tokenizer = None, None
 
 
448
  if torch.cuda.is_available():
449
  torch.cuda.empty_cache()
 
450
  continue
451
 
452
+ model_status = "⚠️ Using fallback analysis mode"
453
+ return f'<div class="status-warning">{model_status}</div>'
 
 
454
 
455
+ # Enhanced attack scenarios
456
  ATTACK_SCENARIOS = {
457
  "🔄 Lateral Movement": {
458
  "description": "Advanced Persistent Threat (APT) - Attacker moving laterally through network after initial compromise",
459
  "severity": "Critical",
460
+ "icon": "🔄",
461
+ "color": "#dc3545",
462
  "alerts": [
463
  {
464
  "id": "ALR-001",
 
507
  "📧 Phishing Campaign": {
508
  "description": "Email-based social engineering attack leading to credential theft and data exfiltration",
509
  "severity": "High",
510
+ "icon": "📧",
511
+ "color": "#fd7e14",
512
  "alerts": [
513
  {
514
  "id": "ALR-004",
 
543
  "🔒 Ransomware Attack": {
544
  "description": "File encryption attack with ransom demand - likely REvil/Sodinokibi variant",
545
  "severity": "Critical",
546
+ "icon": "🔒",
547
+ "color": "#dc3545",
548
  "alerts": [
549
  {
550
  "id": "ALR-006",
 
578
  }
579
  }
580
 
581
+ def get_severity_class(severity):
582
+ """Get CSS class for severity level"""
583
+ classes = {
584
+ "Critical": "severity-critical",
585
+ "High": "severity-high",
586
+ "Medium": "severity-medium",
587
+ "Low": "severity-low"
588
+ }
589
+ return classes.get(severity, "severity-medium")
590
+
591
+ def create_confidence_meter(confidence):
592
+ """Create a visual confidence meter"""
593
+ return f"""
594
+ <div class="confidence-meter">
595
+ <div class="confidence-fill" style="width: {confidence}%"></div>
596
+ </div>
597
+ <small style="color: #6c757d;">{confidence}% Confidence</small>
598
+ """
599
+
600
  @spaces.GPU
601
  def generate_analysis_safe(alert_data, analyst_level):
602
  """Generate analysis with safe error handling"""
603
 
604
  if not model or not tokenizer:
605
+ return get_beautiful_fallback(alert_data, analyst_level)
606
 
607
  security_prompts = {
608
  "L1": f"""As a Level 1 SOC analyst, provide immediate triage for this security alert:
 
641
  try:
642
  prompt = security_prompts.get(analyst_level, security_prompts["L2"])
643
 
644
+ pipe = pipeline(
645
+ "text-generation",
646
+ model=model,
647
+ tokenizer=tokenizer,
648
+ torch_dtype="auto",
649
+ device_map="auto" if torch.cuda.is_available() else None
650
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
651
 
652
+ messages = [{"role": "user", "content": prompt}]
 
 
653
 
654
+ result = pipe(
655
+ messages,
656
+ max_new_tokens=400,
657
+ do_sample=True,
658
+ temperature=0.3,
659
+ top_p=0.9,
660
+ repetition_penalty=1.1,
661
+ pad_token_id=tokenizer.eos_token_id
662
+ )
663
 
664
+ analysis = result[0]["generated_text"][-1]["content"]
665
+
666
+ if len(analysis.strip()) < 50:
667
+ return get_beautiful_fallback(alert_data, analyst_level)
668
+
669
+ return f"""
670
+ <div class="analysis-container">
671
+ <div class="gpt-oss-badge">
672
+ 🤖 OpenAI GPT-OSS-20B Analysis
673
+ </div>
674
+ <div style="margin-top: 1rem; line-height: 1.6;">
675
+ {analysis.strip()}
676
+ </div>
677
+ <div style="margin-top: 1rem; padding-top: 1rem; border-top: 1px solid #dee2e6; color: #6c757d; font-size: 0.9rem;">
678
+ ⚡ Generated using GPT-OSS-20B • 21B parameters • 3.6B active per token
679
+ </div>
680
+ </div>
681
+ """
682
 
683
  except Exception as e:
684
  print(f"Generation error: {e}")
685
+ return get_beautiful_fallback(alert_data, analyst_level)
686
 
687
+ def get_beautiful_fallback(alert_data, analyst_level):
688
+ """Beautiful fallback analysis with enhanced styling"""
689
+
690
+ severity_class = get_severity_class(alert_data['severity'])
691
+ confidence_meter = create_confidence_meter(alert_data['confidence'])
692
 
693
  templates = {
694
+ "L1": f"""
695
+ <div class="alert-card">
696
+ <div class="section-header">
697
+ 🚨 L1 SOC Triage Analysis
698
+ </div>
699
+
700
+ <div style="margin: 1rem 0;">
701
+ <span class="{severity_class}">{alert_data['severity']} Severity</span>
702
+ <span class="mitre-tag" style="margin-left: 0.5rem;">{alert_data['mitre_tactic']}</span>
703
+ </div>
704
+
705
+ <div class="network-flow">
706
+ <div class="network-node">{alert_data['source_ip']}</div>
707
+ <div class="network-arrow">→</div>
708
+ <div class="network-node">{alert_data['destination_ip']}</div>
709
+ </div>
710
+
711
+ <h4 style="color: #2d3436; margin: 1.5rem 0 1rem 0;">⚡ Immediate Actions Required</h4>
712
+ <div style="background: #fff3cd; padding: 1rem; border-radius: 8px; border-left: 4px solid #ffc107;">
713
+ <strong>🔒 Containment:</strong> Isolate system {alert_data['source_ip']}<br>
714
+ <strong>👤 User Action:</strong> Disable account {alert_data['user']}<br>
715
+ <strong>🌐 Network:</strong> Block connections to {alert_data['destination_ip']}<br>
716
+ <strong>📝 Documentation:</strong> Preserve logs and evidence
717
+ </div>
718
+
719
+ <h4 style="color: #2d3436; margin: 1.5rem 0 1rem 0;">📊 Threat Assessment</h4>
720
+ {confidence_meter}
721
+
722
+ <div style="background: #f8d7da; padding: 1rem; border-radius: 8px; border-left: 4px solid #dc3545; margin-top: 1rem;">
723
+ <strong>⬆️ Escalation Required:</strong> {alert_data['severity']} severity warrants L2 investigation
724
+ </div>
725
+ </div>
726
+ """,
727
 
728
+ "L2": f"""
729
+ <div class="alert-card">
730
+ <div class="section-header">
731
+ 🔍 L2 Investigation Analysis
732
+ </div>
733
+
734
+ <div style="margin: 1rem 0;">
735
+ <span class="{severity_class}">{alert_data['severity']} Severity</span>
736
+ <span class="mitre-tag" style="margin-left: 0.5rem;">{alert_data['mitre_tactic']}</span>
737
+ </div>
738
+
739
+ <h4 style="color: #2d3436; margin: 1.5rem 0 1rem 0;">🎯 Attack Vector Analysis</h4>
740
+ <div style="background: #e2e3e5; padding: 1rem; border-radius: 8px;">
741
+ <strong>Technique:</strong> {alert_data['mitre_tactic']}<br>
742
+ <strong>Evidence:</strong> {alert_data['raw_log']}<br>
743
+ <strong>Context:</strong> {alert_data['description']}
744
+ </div>
745
+
746
+ <h4 style="color: #2d3436; margin: 1.5rem 0 1rem 0;">🔬 Investigation Roadmap</h4>
747
+ <div class="timeline-item">
748
+ <strong>1.</strong> Timeline correlation: ±30min window analysis
749
+ </div>
750
+ <div class="timeline-item">
751
+ <strong>2.</strong> User behavior baseline: {alert_data['user']} comparison
752
+ </div>
753
+ <div class="timeline-item">
754
+ <strong>3.</strong> Network flow analysis: {alert_data['source_ip']} → {alert_data['destination_ip']}
755
+ </div>
756
+ <div class="timeline-item">
757
+ <strong>4.</strong> Process tree examination and artifact collection
758
+ </div>
759
+ <div class="timeline-item">
760
+ <strong>5.</strong> Similar IOC hunting across environment
761
+ </div>
762
+
763
+ <h4 style="color: #2d3436; margin: 1.5rem 0 1rem 0;">📊 Threat Intelligence</h4>
764
+ {confidence_meter}
765
+ <div style="background: #d1ecf1; padding: 1rem; border-radius: 8px; border-left: 4px solid #17a2b8; margin-top: 1rem;">
766
+ <strong>Attribution Context:</strong> {alert_data['threat_intel']}
767
+ </div>
768
+ </div>
769
+ """,
770
 
771
+ "L3": f"""
772
+ <div class="alert-card">
773
+ <div class="section-header">
774
+ 🎯 L3 Expert Strategic Analysis
775
+ </div>
776
+
777
+ <div style="margin: 1rem 0;">
778
+ <span class="{severity_class}">{alert_data['severity']} Severity</span>
779
+ <span class="mitre-tag" style="margin-left: 0.5rem;">{alert_data['mitre_tactic']}</span>
780
+ </div>
781
+
782
+ <h4 style="color: #2d3436; margin: 1.5rem 0 1rem 0;">🎭 Adversary Assessment</h4>
783
+ <div style="background: #f8d7da; padding: 1rem; border-radius: 8px;">
784
+ <strong>Sophistication:</strong> Advanced (based on {alert_data['mitre_tactic']})<br>
785
+ <strong>Campaign Context:</strong> {alert_data['threat_intel']}<br>
786
+ <strong>Success Probability:</strong> {alert_data['confidence']}%
787
+ </div>
788
+
789
+ <h4 style="color: #2d3436; margin: 1.5rem 0 1rem 0;">💼 Business Impact</h4>
790
+ {confidence_meter}
791
+ <div style="background: #fff3cd; padding: 1rem; border-radius: 8px; margin-top: 1rem;">
792
+ <strong>🔴 Executive Notification:</strong> Required for {alert_data['severity']} severity<br>
793
+ <strong>📋 Regulatory Impact:</strong> Under compliance review<br>
794
+ <strong>⏰ Response Timeline:</strong> Immediate action required
795
+ </div>
796
+
797
+ <h4 style="color: #2d3436; margin: 1.5rem 0 1rem 0;">🛡️ Strategic Response Plan</h4>
798
+ <div class="timeline-item" style="background: #d4edda;">
799
+ <strong>Immediate:</strong> Threat hunting deployment across infrastructure
800
+ </div>
801
+ <div class="timeline-item" style="background: #cce5ff;">
802
+ <strong>Short-term:</strong> Enhanced monitoring and detection rules
803
+ </div>
804
+ <div class="timeline-item" style="background: #e2e3e5;">
805
+ <strong>Medium-term:</strong> Security architecture review
806
+ </div>
807
+ <div class="timeline-item" style="background: #f8d7da;">
808
+ <strong>Long-term:</strong> Threat intelligence integration
809
+ </div>
810
+ </div>
811
+ """
 
 
 
 
 
812
  }
813
 
814
  return templates.get(analyst_level, templates["L2"])
815
 
816
+ def analyze_alert_beautiful(scenario_name, alert_index, analyst_level):
817
+ """Enhanced analysis function with beautiful output"""
818
  start_time = time.time()
819
 
820
  if scenario_name not in ATTACK_SCENARIOS:
 
831
  # Generate analysis
832
  analysis = generate_analysis_safe(selected_alert, analyst_level)
833
 
834
+ # Create beautiful alert details
835
+ severity_class = get_severity_class(selected_alert['severity'])
836
+ confidence_meter = create_confidence_meter(selected_alert['confidence'])
837
+
838
+ alert_details = f"""
839
+ <div class="alert-card">
840
+ <div style="display: flex; justify-content: space-between; align-items: center; margin-bottom: 1.5rem;">
841
+ <h3 style="margin: 0; color: #2d3436;">🎫 ALERT {selected_alert['id']}</h3>
842
+ <small style="color: #6c757d;">🕐 {selected_alert['timestamp']}</small>
843
+ </div>
844
+
845
+ <div class="network-flow" style="margin: 1rem 0;">
846
+ <div class="network-node">{selected_alert['source_ip']}</div>
847
+ <div class="network-arrow">→</div>
848
+ <div class="network-node">{selected_alert['destination_ip']}</div>
849
+ </div>
850
+
851
+ <div style="margin: 1rem 0;">
852
+ <strong>👤 User Account:</strong> {selected_alert['user']}<br>
853
+ <strong>🎯 Alert Type:</strong> {selected_alert['alert_type']}<br>
854
+ </div>
855
+
856
+ <div style="margin: 1.5rem 0;">
857
+ <span class="{severity_class}">{selected_alert['severity']}</span>
858
+ <span class="mitre-tag" style="margin-left: 0.5rem;">{selected_alert['mitre_tactic']}</span>
859
+ </div>
860
+
861
+ <h4 style="color: #2d3436; margin: 1.5rem 0 1rem 0;">📝 Description</h4>
862
+ <div style="background: #f8f9fa; padding: 1rem; border-radius: 8px; border-left: 4px solid #6c757d;">
863
+ {selected_alert['description']}
864
+ </div>
865
+
866
+ <h4 style="color: #2d3436; margin: 1.5rem 0 1rem 0;">🔍 Technical Evidence</h4>
867
+ <div style="background: #2d3436; color: #ffffff; padding: 1rem; border-radius: 8px; font-family: 'Courier New', monospace; font-size: 0.9rem; overflow-x: auto;">
868
+ {selected_alert['raw_log']}
869
+ </div>
870
+
871
+ <h4 style="color: #2d3436; margin: 1.5rem 0 1rem 0;">🧠 Threat Intelligence</h4>
872
+ <div style="background: #d1ecf1; padding: 1rem; border-radius: 8px; border-left: 4px solid #17a2b8;">
873
+ {selected_alert['threat_intel']}
874
+ </div>
875
+
876
+ <h4 style="color: #2d3436; margin: 1.5rem 0 1rem 0;">📊 Confidence Assessment</h4>
877
+ {confidence_meter}
878
+ </div>
879
+ """
880
 
881
  processing_time = round(time.time() - start_time, 2)
882
  device_info = "GPU" if torch.cuda.is_available() else "CPU"
883
+ status = f"""
884
+ <div class="status-success">
885
+ ✅ {analyst_level} analysis completed in {processing_time}s | Device: {device_info} | {model_status}
886
+ </div>
887
+ """
888
 
889
  return alert_details, analysis, status
890
 
891
+ def get_beautiful_scenario_info(scenario_name):
892
+ """Create beautiful scenario information display"""
893
  if scenario_name in ATTACK_SCENARIOS:
894
  scenario = ATTACK_SCENARIOS[scenario_name]
895
 
896
+ info = f"""
897
+ <div class="content-card">
898
+ <div class="section-header">
899
+ {scenario['icon']} Attack Scenario: {scenario_name}
900
+ </div>
901
+
902
+ <div style="margin: 1.5rem 0;">
903
+ <div class="stat-card" style="display: inline-block; margin-right: 1rem; min-width: 150px;">
904
+ <div class="stat-number">{len(scenario['alerts'])}</div>
905
+ <div class="stat-label">Security Events</div>
906
+ </div>
907
+ <span class="{get_severity_class(scenario['severity'])}" style="vertical-align: top;">
908
+ {scenario['severity']} Severity
909
+ </span>
910
+ </div>
911
+
912
+ <h4 style="color: #2d3436; margin: 1.5rem 0 1rem 0;">📋 Scenario Description</h4>
913
+ <div style="background: #f8f9fa; padding: 1.5rem; border-radius: 12px; border-left: 5px solid {scenario.get('color', '#667eea')};">
914
+ {scenario['description']}
915
+ </div>
916
+
917
+ <h4 style="color: #2d3436; margin: 1.5rem 0 1rem 0;">🔍 Alert Timeline</h4>
918
+ """
919
 
920
  for i, alert in enumerate(scenario['alerts']):
921
+ severity_class = get_severity_class(alert['severity'])
922
+ info += f"""
923
+ <div class="timeline-item" style="margin: 0.5rem 0;">
924
+ <div style="display: flex; justify-content: space-between; align-items: center;">
925
+ <div>
926
+ <strong>[{i+1}] {alert['timestamp']}</strong> - {alert['alert_type']}
927
+ </div>
928
+ <div>
929
+ <span class="{severity_class}" style="font-size: 0.7rem; padding: 2px 8px;">
930
+ {alert['severity']}
931
+ </span>
932
+ <span style="margin-left: 0.5rem; color: #6c757d; font-size: 0.8rem;">
933
+ {alert['confidence']}% confidence
934
+ </span>
935
+ </div>
936
+ </div>
937
+ </div>
938
+ """
939
 
940
  info += """
941
+ <h4 style="color: #2d3436; margin: 1.5rem 0 1rem 0;">🤖 AI Analysis Capabilities</h4>
942
+ <div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); gap: 1rem; margin-top: 1rem;">
943
+ <div class="stat-card">
944
+ <div style="font-size: 1.5rem; margin-bottom: 0.5rem;">🧠</div>
945
+ <div class="stat-label">GPT-OSS-20B Reasoning</div>
946
+ </div>
947
+ <div class="stat-card">
948
+ <div style="font-size: 1.5rem; margin-bottom: 0.5rem;">⚡</div>
949
+ <div class="stat-label">Multi-Strategy Loading</div>
950
+ </div>
951
+ <div class="stat-card">
952
+ <div style="font-size: 1.5rem; margin-bottom: 0.5rem;">🛡️</div>
953
+ <div class="stat-label">Robust Error Handling</div>
954
+ </div>
955
+ <div class="stat-card">
956
+ <div style="font-size: 1.5rem; margin-bottom: 0.5rem;">🎯</div>
957
+ <div class="stat-label">Expert Templates</div>
958
+ </div>
959
+ </div>
960
+ </div>
961
+ """
962
 
963
  return info
964
+ return '<div class="status-warning">⚠️ No scenario selected. Please choose an attack scenario to begin analysis.</div>'
965
 
966
+ # Create beautiful Gradio interface
967
+ with gr.Blocks(title="SOC Assistant - Beautiful Edition", theme=gr.themes.Soft(), css=beautiful_css) as demo:
 
 
 
 
968
 
969
+ # Header
970
+ gr.HTML("""
971
+ <div class="header-container">
972
+ <div style="text-align: center;">
973
+ <h1 style="margin: 0; font-size: 2.5rem; background: linear-gradient(135deg, #667eea, #764ba2); -webkit-background-clip: text; -webkit-text-fill-color: transparent; font-weight: 700;">
974
+ 🛡️ SOC LLM Assistant
975
+ </h1>
976
+ <p style="margin: 0.5rem 0 0 0; font-size: 1.2rem; color: #6c757d; font-weight: 400;">
977
+ Beautiful Edition • Powered by GPT-OSS-20B
978
+ </p>
979
+ <div class="gpt-oss-badge" style="margin-top: 1rem;">
980
+ 🚀 Multi-Strategy AI Model Loading • Enhanced UI/UX
981
+ </div>
982
+ </div>
983
+ </div>
984
  """)
985
 
986
  # Model status display
987
+ status_display = gr.HTML('<div class="status-warning">🔄 Initializing AI models...</div>')
988
 
989
  with gr.Row():
990
+ # Left Panel - Controls
991
+ with gr.Column(scale=1, min_width=350):
992
+ gr.HTML('<div class="section-header">🎮 Attack Simulation Control</div>')
993
 
994
  scenario_dropdown = gr.Dropdown(
995
  choices=list(ATTACK_SCENARIOS.keys()),
996
  label="🎭 Select Attack Scenario",
997
  value="🔄 Lateral Movement",
998
+ interactive=True,
999
+ elem_classes=["custom-input"]
1000
  )
1001
 
1002
+ scenario_info = gr.HTML()
1003
 
1004
+ gr.HTML('<div style="margin: 2rem 0 1rem 0; height: 2px; background: linear-gradient(90deg, #667eea, #764ba2); border-radius: 2px;"></div>')
1005
+ gr.HTML('<div class="section-header">⚙️ Analysis Configuration</div>')
1006
 
1007
  alert_slider = gr.Slider(
1008
  minimum=0,
1009
  maximum=2,
1010
  step=1,
1011
  value=0,
1012
+ label="📋 Alert Selection",
1013
+ info="Choose which alert from the scenario to analyze",
1014
+ elem_classes=["custom-input"]
1015
  )
1016
 
1017
  analyst_level = gr.Radio(
1018
  choices=["L1", "L2", "L3"],
1019
+ label="👤 Analyst Experience Level",
1020
  value="L2",
1021
+ info="🔹 L1: Triage 🔹 L2: Investigation 🔹 L3: Expert Analysis",
1022
+ elem_classes=["custom-input"]
1023
  )
1024
 
1025
  analyze_btn = gr.Button(
1026
+ "🚀 Analyze Alert with AI",
1027
  variant="primary",
1028
+ size="lg",
1029
+ elem_classes=["primary-button"]
1030
  )
1031
 
1032
  init_btn = gr.Button(
1033
+ "🔄 Reinitialize Models",
1034
+ variant="secondary",
1035
+ elem_classes=["secondary-button"]
1036
  )
1037
 
1038
+ gr.HTML('<div style="margin: 2rem 0 1rem 0; height: 2px; background: linear-gradient(90deg, #667eea, #764ba2); border-radius: 2px;"></div>')
1039
+ gr.HTML("""
1040
+ <div class="content-card">
1041
+ <div class="section-header">🔧 System Features</div>
1042
+ <div style="margin-top: 1rem;">
1043
+ <div class="timeline-item" style="background: rgba(102, 126, 234, 0.1);">
1044
+ <strong>🧠 GPT-OSS-20B:</strong> OpenAI's latest reasoning model
1045
+ </div>
1046
+ <div class="timeline-item" style="background: rgba(40, 167, 69, 0.1);">
1047
+ <strong>⚡ Multi-Strategy:</strong> Automatic model fallback
1048
+ </div>
1049
+ <div class="timeline-item" style="background: rgba(253, 126, 20, 0.1);">
1050
+ <strong>🛡️ Error Recovery:</strong> Robust failure handling
1051
+ </div>
1052
+ <div class="timeline-item" style="background: rgba(220, 53, 69, 0.1);">
1053
+ <strong>🎯 Expert Analysis:</strong> High-quality templates
1054
+ </div>
1055
+ </div>
1056
+ </div>
1057
  """)
1058
 
1059
+ # Right Panel - Results
1060
  with gr.Column(scale=2):
1061
+ gr.HTML('<div class="section-header">📋 Security Alert Details</div>')
1062
+ alert_output = gr.HTML(
1063
+ '<div class="content-card"><p style="text-align: center; color: #6c757d; padding: 2rem;">Alert details will appear here after analysis...</p></div>'
 
 
1064
  )
1065
 
1066
+ gr.HTML('<div class="section-header">🤖 AI-Powered Security Analysis</div>')
1067
+ analysis_output = gr.HTML(
1068
+ '<div class="content-card"><p style="text-align: center; color: #6c757d; padding: 2rem;">AI analysis will appear here after processing...</p></div>'
 
 
1069
  )
1070
 
1071
+ status_output = gr.HTML()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1072
 
1073
+ # Footer
1074
+ gr.HTML("""
1075
+ <div class="content-card" style="margin-top: 2rem; text-align: center;">
1076
+ <h3 style="color: #2d3436; margin-bottom: 1rem;">🎯 Enhanced Features</h3>
1077
+ <div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); gap: 1rem;">
1078
+ <div class="stat-card">
1079
+ <div style="font-size: 2rem; margin-bottom: 0.5rem;">🎨</div>
1080
+ <div class="stat-label">Beautiful Design</div>
1081
+ <p style="margin-top: 0.5rem; font-size: 0.8rem; color: #6c757d;">Modern, responsive UI with glassmorphism effects</p>
1082
+ </div>
1083
+ <div class="stat-card">
1084
+ <div style="font-size: 2rem; margin-bottom: 0.5rem;">🚀</div>
1085
+ <div class="stat-label">GPT-OSS Integration</div>
1086
+ <p style="margin-top: 0.5rem; font-size: 0.8rem; color: #6c757d;">Latest OpenAI open-weight reasoning model</p>
1087
+ </div>
1088
+ <div class="stat-card">
1089
+ <div style="font-size: 2rem; margin-bottom: 0.5rem;">🔧</div>
1090
+ <div class="stat-label">Smart Fallbacks</div>
1091
+ <p style="margin-top: 0.5rem; font-size: 0.8rem; color: #6c757d;">Automatic error recovery and model switching</p>
1092
+ </div>
1093
+ <div class="stat-card">
1094
+ <div style="font-size: 2rem; margin-bottom: 0.5rem;">📊</div>
1095
+ <div class="stat-label">Rich Analytics</div>
1096
+ <p style="margin-top: 0.5rem; font-size: 0.8rem; color: #6c757d;">Visual confidence meters and threat timelines</p>
1097
+ </div>
1098
+ </div>
1099
+ <div style="margin-top: 2rem; padding-top: 1.5rem; border-top: 1px solid #dee2e6; color: #6c757d;">
1100
+ <strong>👨‍🎓 Research:</strong> Abdullah Alanazi | <strong>🏛️ Institution:</strong> KAUST | <strong>👨‍🏫 Supervisor:</strong> Prof. Ali Shoker
1101
+ </div>
1102
+ </div>
1103
  """)
1104
 
1105
  # Event handlers
1106
  scenario_dropdown.change(
1107
+ fn=get_beautiful_scenario_info,
1108
  inputs=[scenario_dropdown],
1109
  outputs=[scenario_info]
1110
  )
 
1122
  )
1123
 
1124
  analyze_btn.click(
1125
+ fn=analyze_alert_beautiful,
1126
  inputs=[scenario_dropdown, alert_slider, analyst_level],
1127
  outputs=[alert_output, analysis_output, status_output]
1128
  )
 
1134
 
1135
  # Initialize on startup
1136
  demo.load(
1137
+ fn=get_beautiful_scenario_info,
1138
  inputs=[scenario_dropdown],
1139
  outputs=[scenario_info]
1140
  )