Update app.py
Browse files
app.py
CHANGED
@@ -105,20 +105,16 @@ st.markdown("""
|
|
105 |
@st.cache_resource
|
106 |
def get_pdf_index():
|
107 |
with st.spinner('📄 در حال پردازش فایل PDF...'):
|
108 |
-
# بارگذاری فایل PDF
|
109 |
loader = [PyPDFLoader('test1.pdf')]
|
110 |
|
111 |
-
|
112 |
-
model_name =
|
113 |
-
model = SentenceTransformer(model_name)
|
114 |
|
115 |
-
# تقسیم متنها به بخشهای کوچک
|
116 |
splitter = RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=0)
|
117 |
texts = []
|
118 |
for doc in loader:
|
119 |
texts.extend(splitter.split_text(doc.page_content))
|
120 |
|
121 |
-
# مقداردهی اولیه پروسه بار
|
122 |
progress_bar = st.progress(0)
|
123 |
total_docs = len(texts)
|
124 |
|
@@ -129,19 +125,15 @@ def get_pdf_index():
|
|
129 |
batch_embeddings = model.encode(batch_texts, convert_to_numpy=True)
|
130 |
embeddings.extend(batch_embeddings)
|
131 |
|
132 |
-
# بهروزرسانی پروسه بار
|
133 |
progress_bar.progress(min((i + batch_size) / total_docs, 1.0))
|
134 |
|
135 |
-
# اضافه کردن تاخیر برای تکمیل پروسه بار
|
136 |
time.sleep(1)
|
137 |
progress_bar.empty()
|
138 |
|
139 |
-
# ساخت ایندکس با استفاده از FAISS
|
140 |
embeddings = np.array(embeddings)
|
141 |
-
index = faiss.IndexFlatL2(embeddings.shape[1])
|
142 |
index.add(embeddings)
|
143 |
|
144 |
-
# بازگشت ایندکس
|
145 |
return VectorstoreIndexCreator(
|
146 |
embedding=model.encode,
|
147 |
text_splitter=splitter
|
|
|
105 |
@st.cache_resource
|
106 |
def get_pdf_index():
|
107 |
with st.spinner('📄 در حال پردازش فایل PDF...'):
|
|
|
108 |
loader = [PyPDFLoader('test1.pdf')]
|
109 |
|
110 |
+
model_name = "togethercomputer/m2-bert-80M-8k-retrieval"
|
111 |
+
model = SentenceTransformer(model_name, trust_remote_code=True)
|
|
|
112 |
|
|
|
113 |
splitter = RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=0)
|
114 |
texts = []
|
115 |
for doc in loader:
|
116 |
texts.extend(splitter.split_text(doc.page_content))
|
117 |
|
|
|
118 |
progress_bar = st.progress(0)
|
119 |
total_docs = len(texts)
|
120 |
|
|
|
125 |
batch_embeddings = model.encode(batch_texts, convert_to_numpy=True)
|
126 |
embeddings.extend(batch_embeddings)
|
127 |
|
|
|
128 |
progress_bar.progress(min((i + batch_size) / total_docs, 1.0))
|
129 |
|
|
|
130 |
time.sleep(1)
|
131 |
progress_bar.empty()
|
132 |
|
|
|
133 |
embeddings = np.array(embeddings)
|
134 |
+
index = faiss.IndexFlatL2(embeddings.shape[1])
|
135 |
index.add(embeddings)
|
136 |
|
|
|
137 |
return VectorstoreIndexCreator(
|
138 |
embedding=model.encode,
|
139 |
text_splitter=splitter
|