File size: 6,553 Bytes
6dfbc1e 02a2d80 d3311ac 8a8d9dd 9e59998 8a8d9dd 8dfc9e7 c2110ec 597f25d 26608f4 49c2234 15fa853 05076da cdbe11c 6dfbc1e 6631b7b 2495e1c 8a8d9dd 8a2dda5 49c2234 483ef99 9e59998 865395f 05076da a454a72 483ef99 9e59998 05076da a454a72 05076da 9e59998 755689d 21bd98b 755689d 597f25d 9e59998 03d1abd 755689d 03d1abd 597f25d 9e59998 02a2d80 6dfbc1e 80d2c6b 02a2d80 9e59998 8a8d9dd 02a2d80 9e59998 02a2d80 26608f4 9e59998 26608f4 6dfbc1e 21bd98b 9e59998 21bd98b 21dbe18 21bd98b 02a2d80 26608f4 21bd98b 26608f4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 |
import os
import time
import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema import Document
from langchain.chains import RetrievalQA
from langchain_core.retrievers import BaseRetriever
from langchain_core.prompts import PromptTemplate
from typing import List
from pydantic import Field
import numpy as np
from sentence_transformers import SentenceTransformer
import faiss
from langchain.indexes import VectorstoreIndexCreator
from langchain.vectorstores import FAISS
# ----------------- تنظیمات صفحه -----------------
st.set_page_config(page_title="چت بات توانا", page_icon="🪖", layout="wide")
st.markdown("""
<style>
@import url('https://fonts.googleapis.com/css2?family=Vazirmatn:wght@400;700&display=swap');
html, body, [class*="css"] {
font-family: 'Vazirmatn', Tahoma, sans-serif;
direction: rtl;
text-align: right;
}
.stApp {
background: url("./military_bg.jpeg") no-repeat center center fixed;
background-size: cover;
backdrop-filter: blur(2px);
}
.stChatMessage {
background-color: rgba(255,255,255,0.8);
border: 1px solid #4e8a3e;
border-radius: 12px;
padding: 16px;
margin-bottom: 15px;
box-shadow: 0 4px 10px rgba(0,0,0,0.2);
animation: fadeIn 0.4s ease-in-out;
}
.stTextInput > div > input, .stTextArea textarea {
background-color: rgba(255,255,255,0.9) !important;
border-radius: 8px !important;
direction: rtl;
text-align: right;
font-family: 'Vazirmatn', Tahoma;
}
.stButton>button {
background-color: #4e8a3e !important;
color: white !important;
font-weight: bold;
border-radius: 10px;
padding: 8px 20px;
transition: 0.3s;
}
.stButton>button:hover {
background-color: #3c6d30 !important;
}
.header-text {
text-align: center;
margin-top: 20px;
margin-bottom: 40px;
background-color: rgba(255, 255, 255, 0.75);
padding: 20px;
border-radius: 20px;
box-shadow: 0 4px 12px rgba(0,0,0,0.2);
}
.header-text h1 {
font-size: 42px;
color: #2c3e50;
margin: 0;
font-weight: bold;
}
.subtitle {
font-size: 18px;
color: #34495e;
margin-top: 8px;
}
@keyframes fadeIn {
from { opacity: 0; transform: translateY(10px); }
to { opacity: 1; transform: translateY(0); }
}
</style>
""", unsafe_allow_html=True)
col1, col2, col3 = st.columns([1, 0.2, 1])
with col2:
st.image("army.png", width=240)
st.markdown("""
<div class="header-text">
<h1>چت بات توانا</h1>
<div class="subtitle">دستیار هوشمند برای تصمیمگیری در میدان نبرد</div>
</div>
""", unsafe_allow_html=True)
# ----------------- لود PDF و ساخت ایندکس -----------------
@st.cache_resource
def get_pdf_index():
with st.spinner('📄 در حال پردازش فایل PDF...'):
loader = PyPDFLoader('test1.pdf')
documents = loader.load()
splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=128)
texts = []
for doc in documents:
texts.extend(splitter.split_text(doc.page_content))
vectorstore_index_creator = VectorstoreIndexCreator(
vectorstore_cls=FAISS,
embedding_function=SentenceTransformer("togethercomputer/m2-bert-80M-8k-retrieval", trust_remote_code=True)
)
index = vectorstore_index_creator.from_documents([Document(page_content=text) for text in texts])
return index
# ----------------- بارگذاری دیتا -----------------
documents, embeddings, index, model = get_pdf_index()
retriever = SimpleRetriever(
documents=documents,
embeddings=embeddings,
index=index,
model=model
)
# ----------------- تعریف LLM -----------------
llm = ChatOpenAI(
base_url="https://api.together.xyz/v1",
api_key='0291f33aee03412a47fa5d8e562e515182dcc5d9aac5a7fb5eefdd1759005979',
model="meta-llama/Llama-3.3-70B-Instruct-Turbo-Free"
)
# ----------------- ساخت Chain -----------------
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
retriever=retriever,
chain_type="stuff",
chain_type_kwargs={"prompt": custom_prompt}
)
# ----------------- چت استیت -----------------
if 'messages' not in st.session_state:
st.session_state.messages = []
if 'pending_prompt' not in st.session_state:
st.session_state.pending_prompt = None
# ----------------- نمایش پیامهای قبلی -----------------
for msg in st.session_state.messages:
with st.chat_message(msg['role']):
st.markdown(f"🗨️ {msg['content']}", unsafe_allow_html=True)
# ----------------- ورودی کاربر -----------------
prompt = st.chat_input("سوالی در مورد فایل بپرس...")
if prompt:
st.session_state.messages.append({'role': 'user', 'content': prompt})
st.session_state.pending_prompt = prompt
st.rerun()
# ----------------- پاسخدهی مدل -----------------
if st.session_state.pending_prompt:
with st.chat_message('ai'):
thinking = st.empty()
thinking.markdown("🤖 در حال فکر کردن...")
try:
# اگر مدل نتواند پاسخ دقیقی پیدا کند
response = qa_chain.run(st.session_state.pending_prompt)
if not response.strip(): # اگر پاسخ خالی یا بیفایده بود
response = "متاسفانه اطلاعات دقیقی برای پاسخ به این سوال موجود نیست."
else:
response = response.strip()
except Exception as e:
response = "متاسفانه اطلاعات لازم برای پاسخ به این سوال موجود نیست."
thinking.empty()
full_response = ""
placeholder = st.empty()
for word in response.split():
full_response += word + " "
placeholder.markdown(full_response + "▌")
time.sleep(0.03)
placeholder.markdown(full_response)
st.session_state.messages.append({'role': 'ai', 'content': full_response})
st.session_state.pending_prompt = None
|