import streamlit as st
from hazm import Normalizer, SentenceTokenizer
import os
import docx
from langchain.chat_models import ChatOpenAI
from langchain.schema import SystemMessage, HumanMessage
from rapidfuzz import fuzz
import concurrent.futures
import time
import numpy as np
from hazm import *
import re
import nltk
nltk.download('punkt')
st.markdown("""
""", unsafe_allow_html=True)
st.markdown("""
""", unsafe_allow_html=True)
if "authenticated" not in st.session_state:
st.session_state.authenticated = False
if not st.session_state.authenticated:
st.markdown('', unsafe_allow_html=True)
st.markdown("""
""", unsafe_allow_html=True)
st.markdown("""
""", unsafe_allow_html=True)
username = st.text_input("نام کاربری:", placeholder="شناسه خود را وارد کنید",
label_visibility="visible")
password = st.text_input("رمز عبور:", placeholder="رمز عبور ", type="password",
label_visibility="visible")
st.markdown("""
""", unsafe_allow_html=True)
if st.button("ورود"):
if username == "admin" and password == "123":
st.session_state.authenticated = True
st.rerun()
else:
st.markdown("""
""", unsafe_allow_html=True)
else:
st.markdown("")
st.markdown("""
""", unsafe_allow_html=True)
import os
import re
import docx
import streamlit as st
import concurrent.futures
from hazm import Normalizer
from rapidfuzz import fuzz
from langchain.schema import SystemMessage, HumanMessage
folder_path = '46'
normalizer = Normalizer()
@st.cache_data(show_spinner="در حال پردازش اسناد... لطفاً صبور باشید.")
def load_and_process_documents(path):
def process_docx(filename):
try:
full_path = os.path.join(path, filename)
doc = docx.Document(full_path)
text = "\n".join([para.text for para in doc.paragraphs])
normalized = normalizer.normalize(text)
return filename, normalized
except Exception as e:
print(f"Error processing {filename}: {e}")
return filename, ""
filenames = [f for f in os.listdir(path) if f.endswith(".docx")]
doc_texts = {}
with concurrent.futures.ThreadPoolExecutor() as executor:
for filename, content in executor.map(process_docx, filenames):
doc_texts[filename] = content
return doc_texts
doc_texts = load_and_process_documents(folder_path)
with open('stopwords.txt', 'r', encoding='utf-8') as file:
stop_words = set(file.read().splitlines())
def remove_stop_words(text, stop_words):
words = text.split()
return " ".join([word for word in words if word not in stop_words])
def extract_keywords_from_text(text, query_words):
matched_lines = []
lines = text.split("\n")
for line in lines:
if any(query_word in line for query_word in query_words):
matched_lines.append(line)
return matched_lines
def clean_text(text):
return re.sub(r'[^آ-ی۰-۹0-9،.؟!؛+\-* ]+', '', text)
from collections import Counter
import heapq
def summarize_text_by_frequency(text, num_sentences=1):
sentences = text.split('\n')
word_freq = Counter()
for sentence in sentences:
for word in sentence.split():
if word not in stop_words:
word_freq[word] += 1
sentence_scores = {}
for sentence in sentences:
for word in sentence.split():
if word in word_freq:
sentence_scores[sentence] = sentence_scores.get(sentence, 0) + word_freq[word]
summarized_sentences = heapq.nlargest(num_sentences, sentence_scores, key=sentence_scores.get)
return "\n".join(summarized_sentences)
def find_closest_lines(query, doc_texts, stop_words, top_n=5):
cleaned_query = remove_stop_words(query, stop_words)
query_words = cleaned_query.split()
all_matched_lines = []
for filename, text in doc_texts.items():
matched_lines = extract_keywords_from_text(text, query_words)
for line in matched_lines:
similarity = fuzz.partial_ratio(query, line)
all_matched_lines.append((line, similarity))
all_matched_lines.sort(key=lambda x: x[1], reverse=True)
closest_lines = [line for line, _ in all_matched_lines[:top_n]]
return closest_lines
def remove_stop_words_from_lines(lines, stop_words):
cleaned_lines = []
for line in lines:
words = line.split()
cleaned_words = [word for word in words if word not in stop_words]
cleaned_lines.append(" ".join(cleaned_words))
return cleaned_lines
if query:
closest_lines = find_closest_lines(query, doc_texts, stop_words, top_n=10)
# حذف استپورد و پاکسازی خطوط
cleaned_closest_lines = [
clean_text(" ".join([word for word in line.split() if word not in stop_words]))
for line in closest_lines
]
# خلاصهسازی
summarized_text = summarize_text_by_frequency("\n".join(cleaned_closest_lines), num_sentences=3)
if summarized_text.strip():
prompt = f"""
لطفاً با توجه به سؤال زیر و محتوای خلاصهشده، یک پاسخ نهایی حرفهای، دقیق و روان تولید کن. فقط از متن استفاده کن. اگر اطلاعات کافی در متن وجود ندارد، صادقانه اعلام کن و از دانش کمک بگیر بنویس و بنویس ک از دانش خودت استفاده کردی.
سوال:
{query}
خلاصهی مرتبط:
{summarized_text}
پاسخ نهایی:
"""
rewritten = clean_text(response.content.strip())
st.markdown(f'
{rewritten}
', unsafe_allow_html=True)
else:
st.warning("هیچ محتوای خلاصهشدهای برای پاسخ وجود ندارد.")