|
from langchain.chains import RetrievalQA |
|
from langchain_community.llms import HuggingFacePipeline |
|
from transformers import pipeline |
|
from modules import parser, vectorizer |
|
|
|
def run_analysis(uploaded_files, text_input, query, quick_action, temperature, start_time, end_time): |
|
logs_text = "" |
|
|
|
if uploaded_files: |
|
logs_text += parser.parse_uploaded_files(uploaded_files) |
|
|
|
if text_input: |
|
logs_text += "\n" + text_input |
|
|
|
if not logs_text.strip(): |
|
return "❌ No logs provided.", None, None, None |
|
|
|
query_text = query if query else quick_action |
|
if not query_text: |
|
return "❌ No query provided.", None, None, None |
|
|
|
docs = vectorizer.prepare_documents(logs_text) |
|
vectordb = vectorizer.create_vectorstore(docs) |
|
|
|
pipe = pipeline("text-generation", model="gpt2", max_length=512, temperature=temperature) |
|
llm = HuggingFacePipeline(pipeline=pipe) |
|
|
|
qa = RetrievalQA.from_chain_type(llm=llm, retriever=vectordb.as_retriever()) |
|
result = qa.run(query_text) |
|
|
|
|
|
bar_data = {"Hour": ["14:00", "15:00"], "Count": [8, 4]} |
|
pie_data = {"Event Type": ["Blocked", "Scan"], "Count": [8, 4]} |
|
alerts = [("CRITICAL", "8 blocked SSH attempts from 192.168.1.5"), |
|
("WARNING", "4 port scanning alerts from 10.0.0.8")] |
|
|
|
return result, bar_data, pie_data, alerts |
|
|