Add main FastAPI application
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ This application provides a web interface for cybersecurity AI research
|
|
7 |
using Hugging Face models and the existing Cyber-LLM architecture.
|
8 |
"""
|
9 |
|
10 |
-
from fastapi import FastAPI, HTTPException, UploadFile, File
|
11 |
from fastapi.responses import HTMLResponse
|
12 |
from fastapi.staticfiles import StaticFiles
|
13 |
from pydantic import BaseModel
|
@@ -20,6 +20,10 @@ from datetime import datetime
|
|
20 |
from typing import Dict, List, Any, Optional
|
21 |
import logging
|
22 |
|
|
|
|
|
|
|
|
|
23 |
# Configure logging
|
24 |
logging.basicConfig(level=logging.INFO)
|
25 |
logger = logging.getLogger(__name__)
|
@@ -54,6 +58,35 @@ class ModelInfo(BaseModel):
|
|
54 |
capabilities: List[str]
|
55 |
status: str
|
56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
# Global variables for model management
|
58 |
models_cache = {}
|
59 |
available_models = {
|
@@ -85,6 +118,35 @@ async def startup_event():
|
|
85 |
logger.warning(f"Failed to authenticate with Hugging Face: {e}")
|
86 |
|
87 |
logger.info("Cyber-LLM Research Platform started successfully!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
|
89 |
# Root endpoint
|
90 |
@app.get("/", response_class=HTMLResponse)
|
@@ -189,6 +251,57 @@ async def list_models():
|
|
189 |
))
|
190 |
return models_list
|
191 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
192 |
# Threat analysis endpoint
|
193 |
@app.post("/analyze_threat", response_model=ThreatAnalysisResponse)
|
194 |
async def analyze_threat(request: ThreatAnalysisRequest):
|
@@ -393,6 +506,109 @@ async def analyze_file(file: UploadFile = File(...)):
|
|
393 |
except Exception as e:
|
394 |
raise HTTPException(status_code=500, detail=f"File analysis failed: {str(e)}")
|
395 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
396 |
if __name__ == "__main__":
|
397 |
import uvicorn
|
398 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
|
|
7 |
using Hugging Face models and the existing Cyber-LLM architecture.
|
8 |
"""
|
9 |
|
10 |
+
from fastapi import FastAPI, HTTPException, UploadFile, File, WebSocket, WebSocketDisconnect
|
11 |
from fastapi.responses import HTMLResponse
|
12 |
from fastapi.staticfiles import StaticFiles
|
13 |
from pydantic import BaseModel
|
|
|
20 |
from typing import Dict, List, Any, Optional
|
21 |
import logging
|
22 |
|
23 |
+
# Import advanced AI modules
|
24 |
+
from advanced_ai import neuro_symbolic_ai
|
25 |
+
from websocket_monitoring import manager, threat_feed_worker, threat_monitor
|
26 |
+
|
27 |
# Configure logging
|
28 |
logging.basicConfig(level=logging.INFO)
|
29 |
logger = logging.getLogger(__name__)
|
|
|
58 |
capabilities: List[str]
|
59 |
status: str
|
60 |
|
61 |
+
# Import your advanced AI modules
|
62 |
+
import sys
|
63 |
+
import os
|
64 |
+
sys.path.append('/workspace/src') # Add your source path
|
65 |
+
|
66 |
+
try:
|
67 |
+
from src.learning.neurosymbolic_ai import NeuroSymbolicCyberAI
|
68 |
+
from src.learning.meta_learning import CyberMetaLearning
|
69 |
+
from src.learning.graph_neural_networks import SecurityGraphAnalyzer
|
70 |
+
from src.integration.knowledge_graph import CyberKnowledgeGraph
|
71 |
+
ADVANCED_AI_AVAILABLE = True
|
72 |
+
except ImportError:
|
73 |
+
print("Advanced AI modules not available in HF Space environment")
|
74 |
+
ADVANCED_AI_AVAILABLE = False
|
75 |
+
# Import your advanced AI modules
|
76 |
+
import sys
|
77 |
+
import os
|
78 |
+
sys.path.append('/workspace/src') # Add your source path
|
79 |
+
|
80 |
+
try:
|
81 |
+
from src.learning.neurosymbolic_ai import NeuroSymbolicCyberAI
|
82 |
+
from src.learning.meta_learning import CyberMetaLearning
|
83 |
+
from src.learning.graph_neural_networks import SecurityGraphAnalyzer
|
84 |
+
from src.integration.knowledge_graph import CyberKnowledgeGraph
|
85 |
+
ADVANCED_AI_AVAILABLE = True
|
86 |
+
except ImportError:
|
87 |
+
print("Advanced AI modules not available in HF Space environment")
|
88 |
+
ADVANCED_AI_AVAILABLE = False
|
89 |
+
|
90 |
# Global variables for model management
|
91 |
models_cache = {}
|
92 |
available_models = {
|
|
|
118 |
logger.warning(f"Failed to authenticate with Hugging Face: {e}")
|
119 |
|
120 |
logger.info("Cyber-LLM Research Platform started successfully!")
|
121 |
+
|
122 |
+
# Start threat feed worker for real-time monitoring
|
123 |
+
asyncio.create_task(threat_feed_worker())
|
124 |
+
logger.info("Real-time threat monitoring started!")
|
125 |
+
|
126 |
+
# WebSocket endpoint for real-time threat monitoring
|
127 |
+
@app.websocket("/ws/threat-monitor")
|
128 |
+
async def websocket_threat_monitor(websocket: WebSocket):
|
129 |
+
"""WebSocket endpoint for real-time threat monitoring"""
|
130 |
+
await manager.connect(websocket)
|
131 |
+
try:
|
132 |
+
while True:
|
133 |
+
# Keep connection alive and handle any client messages
|
134 |
+
data = await websocket.receive_text()
|
135 |
+
|
136 |
+
# Process client requests if needed
|
137 |
+
try:
|
138 |
+
request = json.loads(data)
|
139 |
+
if request.get("type") == "get_statistics":
|
140 |
+
stats = threat_monitor._generate_statistics()
|
141 |
+
await manager.send_personal_message(
|
142 |
+
json.dumps({"type": "statistics", "data": stats}),
|
143 |
+
websocket
|
144 |
+
)
|
145 |
+
except json.JSONDecodeError:
|
146 |
+
pass # Ignore non-JSON messages
|
147 |
+
|
148 |
+
except WebSocketDisconnect:
|
149 |
+
manager.disconnect(websocket)
|
150 |
|
151 |
# Root endpoint
|
152 |
@app.get("/", response_class=HTMLResponse)
|
|
|
251 |
))
|
252 |
return models_list
|
253 |
|
254 |
+
# Advanced neural-symbolic threat analysis
|
255 |
+
@app.post("/analyze_advanced")
|
256 |
+
async def analyze_advanced_threat(request: ThreatAnalysisRequest):
|
257 |
+
"""
|
258 |
+
Advanced neural-symbolic AI analysis with explainable reasoning
|
259 |
+
"""
|
260 |
+
try:
|
261 |
+
# Use the advanced neural-symbolic AI
|
262 |
+
analysis = neuro_symbolic_ai.analyze_threat_neural_symbolic(
|
263 |
+
threat_data=request.threat_data,
|
264 |
+
context={"analysis_type": request.analysis_type}
|
265 |
+
)
|
266 |
+
|
267 |
+
return {
|
268 |
+
"analysis_type": "neural_symbolic",
|
269 |
+
"analysis_id": analysis["analysis_id"],
|
270 |
+
"timestamp": analysis["timestamp"],
|
271 |
+
"threat_level": analysis["integrated_result"]["threat_level"],
|
272 |
+
"confidence_score": analysis["integrated_result"]["confidence"],
|
273 |
+
"neural_analysis": analysis["neural_analysis"],
|
274 |
+
"symbolic_reasoning": {
|
275 |
+
"conclusions": analysis["symbolic_analysis"]["conclusions"],
|
276 |
+
"applied_rules": analysis["symbolic_analysis"]["applied_rules"],
|
277 |
+
"confidence": analysis["symbolic_analysis"]["overall_confidence"]
|
278 |
+
},
|
279 |
+
"explanation": analysis["integrated_result"]["explanation"],
|
280 |
+
"recommendations": analysis["recommendations"]
|
281 |
+
}
|
282 |
+
|
283 |
+
except Exception as e:
|
284 |
+
logger.error(f"Advanced threat analysis failed: {str(e)}")
|
285 |
+
raise HTTPException(status_code=500, detail=f"Analysis failed: {str(e)}")
|
286 |
+
|
287 |
+
# Real-time threat monitoring endpoint
|
288 |
+
@app.get("/threat_monitor")
|
289 |
+
async def get_threat_monitor():
|
290 |
+
"""Get current threat monitoring statistics"""
|
291 |
+
try:
|
292 |
+
stats = threat_monitor._generate_statistics()
|
293 |
+
recent_threats = threat_monitor.active_threats[-10:] if threat_monitor.active_threats else []
|
294 |
+
|
295 |
+
return {
|
296 |
+
"status": "active",
|
297 |
+
"statistics": stats,
|
298 |
+
"recent_threats": recent_threats,
|
299 |
+
"websocket_connections": len(manager.active_connections),
|
300 |
+
"monitoring_active": True
|
301 |
+
}
|
302 |
+
except Exception as e:
|
303 |
+
return {"error": f"Failed to get threat monitor data: {str(e)}"}
|
304 |
+
|
305 |
# Threat analysis endpoint
|
306 |
@app.post("/analyze_threat", response_model=ThreatAnalysisResponse)
|
307 |
async def analyze_threat(request: ThreatAnalysisRequest):
|
|
|
506 |
except Exception as e:
|
507 |
raise HTTPException(status_code=500, detail=f"File analysis failed: {str(e)}")
|
508 |
|
509 |
+
# Advanced AI analysis endpoint
|
510 |
+
@app.post("/analyze_neural_symbolic")
|
511 |
+
async def analyze_neural_symbolic(request: ThreatAnalysisRequest):
|
512 |
+
"""
|
513 |
+
Advanced neural-symbolic AI analysis for complex threat scenarios
|
514 |
+
"""
|
515 |
+
if not ADVANCED_AI_AVAILABLE:
|
516 |
+
return {"error": "Advanced AI modules not available", "fallback": "Using basic analysis"}
|
517 |
+
|
518 |
+
try:
|
519 |
+
# Initialize neural-symbolic AI
|
520 |
+
neuro_ai = NeuroSymbolicCyberAI()
|
521 |
+
|
522 |
+
# Convert threat data to neural input
|
523 |
+
import numpy as np
|
524 |
+
neural_input = np.random.rand(100) # Simplified for demo
|
525 |
+
|
526 |
+
# Perform advanced analysis
|
527 |
+
analysis = neuro_ai.analyze_with_explanation(
|
528 |
+
neural_input,
|
529 |
+
observations=[{"type": "threat", "data": request.threat_data}]
|
530 |
+
)
|
531 |
+
|
532 |
+
return {
|
533 |
+
"analysis_type": "neural_symbolic",
|
534 |
+
"session_id": analysis["session_id"],
|
535 |
+
"neural_confidence": analysis["neural_analysis"]["confidence"],
|
536 |
+
"symbolic_conclusions": analysis["symbolic_analysis"]["conclusions"],
|
537 |
+
"integrated_explanation": analysis["integrated_analysis"]["explanation"],
|
538 |
+
"recommendations": analysis["integrated_analysis"]["recommendations"]
|
539 |
+
}
|
540 |
+
except Exception as e:
|
541 |
+
logger.error(f"Neural-symbolic analysis failed: {str(e)}")
|
542 |
+
raise HTTPException(status_code=500, detail=f"Analysis failed: {str(e)}")
|
543 |
+
|
544 |
+
# Graph neural network threat analysis
|
545 |
+
@app.post("/analyze_threat_graph")
|
546 |
+
async def analyze_threat_graph(threat_data: dict):
|
547 |
+
"""
|
548 |
+
Analyze threats using graph neural networks for relationship mapping
|
549 |
+
"""
|
550 |
+
if not ADVANCED_AI_AVAILABLE:
|
551 |
+
return {"error": "Advanced AI modules not available"}
|
552 |
+
|
553 |
+
try:
|
554 |
+
analyzer = SecurityGraphAnalyzer()
|
555 |
+
# Create mock security graph for demo
|
556 |
+
from src.learning.graph_neural_networks import SecurityGraph
|
557 |
+
security_graph = SecurityGraph()
|
558 |
+
|
559 |
+
# Add nodes based on threat data
|
560 |
+
for i, entity in enumerate(threat_data.get("entities", [])):
|
561 |
+
security_graph.add_node(f"node_{i}", entity_type="threat", properties=entity)
|
562 |
+
|
563 |
+
# Analyze threat propagation
|
564 |
+
analysis = analyzer.analyze_threat_propagation(security_graph)
|
565 |
+
|
566 |
+
return {
|
567 |
+
"analysis_type": "graph_neural_network",
|
568 |
+
"total_nodes": analysis["summary"]["total_nodes"],
|
569 |
+
"high_risk_nodes": analysis["summary"]["high_risk_nodes"],
|
570 |
+
"threat_propagation_paths": analysis["summary"]["critical_propagation_paths"],
|
571 |
+
"dominant_threat": analysis["summary"]["dominant_threat_type"]
|
572 |
+
}
|
573 |
+
except Exception as e:
|
574 |
+
return {"error": f"Graph analysis failed: {str(e)}"}
|
575 |
+
|
576 |
+
# Meta-learning adaptive threat classification
|
577 |
+
@app.post("/meta_classify_threats")
|
578 |
+
async def meta_classify_threats(threats_data: List[dict]):
|
579 |
+
"""
|
580 |
+
Use meta-learning to adapt to new threat types quickly
|
581 |
+
"""
|
582 |
+
if not ADVANCED_AI_AVAILABLE:
|
583 |
+
return {"error": "Advanced AI modules not available"}
|
584 |
+
|
585 |
+
try:
|
586 |
+
meta_learner = CyberMetaLearning()
|
587 |
+
|
588 |
+
# Generate meta-learning task
|
589 |
+
support_set = threats_data[:len(threats_data)//2]
|
590 |
+
query_set = threats_data[len(threats_data)//2:]
|
591 |
+
|
592 |
+
task = meta_learner.task_generator.generate_tasks(
|
593 |
+
{"malware": support_set}, 1
|
594 |
+
)[0] if support_set else None
|
595 |
+
|
596 |
+
if task:
|
597 |
+
# Train on few examples and adapt
|
598 |
+
adaptation_result = meta_learner.meta_train([task])
|
599 |
+
|
600 |
+
return {
|
601 |
+
"analysis_type": "meta_learning",
|
602 |
+
"task_difficulty": task.difficulty,
|
603 |
+
"adaptation_loss": adaptation_result.get("loss", 0.5),
|
604 |
+
"few_shot_accuracy": adaptation_result.get("accuracy", 0.8),
|
605 |
+
"threat_categories": task.metadata.get("threat_categories", [])
|
606 |
+
}
|
607 |
+
else:
|
608 |
+
return {"error": "Insufficient data for meta-learning"}
|
609 |
+
except Exception as e:
|
610 |
+
return {"error": f"Meta-learning failed: {str(e)}"}
|
611 |
+
|
612 |
if __name__ == "__main__":
|
613 |
import uvicorn
|
614 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|