Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -147,7 +147,7 @@ import torch
|
|
147 |
device = "cpu"
|
148 |
|
149 |
# Load models once
|
150 |
-
embedding_model = SentenceTransformer("all-MiniLM-L6-v2-int8", device=device
|
151 |
summarizer = pipeline("summarization", model="facebook/bart-large-cnn", device=-1)
|
152 |
|
153 |
# Load API Key
|
@@ -156,6 +156,7 @@ api_key = os.getenv("API_KEY")
|
|
156 |
genai.configure(api_key=api_key)
|
157 |
gemini_model = genai.GenerativeModel(model_name="gemini-2.0-flash")
|
158 |
|
|
|
159 |
@st.cache_resource
|
160 |
def load_faiss_index():
|
161 |
if not os.path.exists("faiss_index.idx") or not os.path.exists("doc_texts.npy"):
|
@@ -173,6 +174,8 @@ def query_legal_documents(query: str, top_k=3):
|
|
173 |
return []
|
174 |
|
175 |
query_embedding = embedding_model.encode([query])
|
|
|
|
|
176 |
distances, indices = faiss_index.search(query_embedding, top_k)
|
177 |
|
178 |
return [doc_texts[i] for i in indices[0] if i < len(doc_texts)]
|
|
|
147 |
device = "cpu"
|
148 |
|
149 |
# Load models once
|
150 |
+
embedding_model = SentenceTransformer("all-MiniLM-L6-v2-int8", device=device) # Removed normalize_embeddings
|
151 |
summarizer = pipeline("summarization", model="facebook/bart-large-cnn", device=-1)
|
152 |
|
153 |
# Load API Key
|
|
|
156 |
genai.configure(api_key=api_key)
|
157 |
gemini_model = genai.GenerativeModel(model_name="gemini-2.0-flash")
|
158 |
|
159 |
+
# Cache FAISS index & document texts
|
160 |
@st.cache_resource
|
161 |
def load_faiss_index():
|
162 |
if not os.path.exists("faiss_index.idx") or not os.path.exists("doc_texts.npy"):
|
|
|
174 |
return []
|
175 |
|
176 |
query_embedding = embedding_model.encode([query])
|
177 |
+
faiss.normalize_L2(query_embedding) # Normalize embeddings manually
|
178 |
+
|
179 |
distances, indices = faiss_index.search(query_embedding, top_k)
|
180 |
|
181 |
return [doc_texts[i] for i in indices[0] if i < len(doc_texts)]
|