Spaces:
Running
Running
Update search_utils.py
Browse files- search_utils.py +145 -107
search_utils.py
CHANGED
@@ -80,123 +80,124 @@ class SemanticSearch:
|
|
80 |
self.index_shards = []
|
81 |
self.metadata_mgr = MetadataManager()
|
82 |
self.shard_sizes = []
|
83 |
-
|
84 |
-
|
85 |
self.logger = logging.getLogger("SemanticSearch")
|
86 |
self.logger.info("Initializing SemanticSearch")
|
87 |
-
|
88 |
@st.cache_resource
|
89 |
def load_model(_self):
|
90 |
return SentenceTransformer('all-MiniLM-L6-v2')
|
91 |
-
|
92 |
def initialize_system(self):
|
93 |
self.logger.info("Loading sentence transformer model")
|
94 |
start_time = time.time()
|
95 |
self.model = self.load_model()
|
96 |
self.logger.info(f"Model loaded in {time.time() - start_time:.2f} seconds")
|
|
|
97 |
self.logger.info("Loading FAISS indices")
|
98 |
self._load_faiss_shards()
|
99 |
-
|
100 |
def _load_faiss_shards(self):
|
101 |
-
"""Load FAISS shards
|
102 |
self.logger.info(f"Searching for index files in {self.shard_dir}")
|
|
|
103 |
if not self.shard_dir.exists():
|
104 |
self.logger.error(f"Shard directory not found: {self.shard_dir}")
|
105 |
return
|
106 |
-
|
|
|
107 |
self.logger.info(f"Found {len(index_files)} index files")
|
108 |
-
|
109 |
self.shard_sizes = []
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
|
|
|
|
|
|
|
|
125 |
self.total_vectors = sum(self.shard_sizes)
|
126 |
self.logger.info(f"Total loaded vectors: {self.total_vectors} across {len(self.index_shards)} shards")
|
127 |
-
self.cumulative_offsets = np.cumsum([0] + self.shard_sizes)
|
128 |
-
|
129 |
-
def _load_single_index(self, shard_path):
|
130 |
-
"""Load a single FAISS index shard."""
|
131 |
-
self.logger.info(f"Loading index: {shard_path}")
|
132 |
-
start_time = time.time()
|
133 |
-
file_size_mb = os.path.getsize(shard_path) / (1024 * 1024)
|
134 |
-
self.logger.info(f"Index file size: {file_size_mb:.2f} MB")
|
135 |
-
index = faiss.read_index(str(shard_path))
|
136 |
-
size = index.ntotal
|
137 |
-
self.logger.info(f"Index loaded in {time.time() - start_time:.2f} seconds")
|
138 |
-
return index, size
|
139 |
|
140 |
def _global_index(self, shard_idx, local_idx):
|
141 |
-
"""Convert
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
if local_idx < 0 or local_idx >= self.shard_sizes[shard_idx]:
|
146 |
-
self.logger.warning(f"Local index {local_idx} may be out of bounds for shard {shard_idx}")
|
147 |
-
return int(self.cumulative_offsets[shard_idx] + local_idx)
|
148 |
-
|
149 |
def search(self, query, top_k=5):
|
150 |
-
"""Search
|
151 |
self.logger.info(f"Searching for query: '{query}' (top_k={top_k})")
|
152 |
start_time = time.time()
|
|
|
153 |
if not query:
|
154 |
self.logger.warning("Empty query provided")
|
155 |
return pd.DataFrame()
|
|
|
156 |
if not self.index_shards:
|
157 |
self.logger.error("No index shards loaded")
|
158 |
return pd.DataFrame()
|
|
|
159 |
try:
|
160 |
self.logger.info("Encoding query")
|
161 |
query_embedding = self.model.encode([query], convert_to_numpy=True)
|
162 |
-
# Normalize query embedding for proper cosine similarity comparison
|
163 |
-
query_embedding = query_embedding / np.linalg.norm(query_embedding, axis=1, keepdims=True)
|
164 |
self.logger.debug(f"Query encoded to shape {query_embedding.shape}")
|
165 |
except Exception as e:
|
166 |
self.logger.error(f"Query encoding failed: {str(e)}")
|
167 |
return pd.DataFrame()
|
168 |
-
|
169 |
all_distances = []
|
170 |
all_global_indices = []
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
self.logger.info(f"Search found {len(all_global_indices)} results across all shards")
|
189 |
|
190 |
-
#
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
top_distances = np.array(sorted_distances[:top_k])
|
197 |
-
top_indices = np.array(sorted_indices[:top_k])
|
198 |
|
199 |
-
results = self._process_results(top_distances, top_indices, top_k)
|
200 |
self.logger.info(f"Search completed in {time.time() - start_time:.2f} seconds with {len(results)} final results")
|
201 |
return results
|
202 |
|
@@ -285,40 +286,77 @@ class SemanticSearch:
|
|
285 |
self.logger.error(f"Result processing failed: {str(e)}", exc_info=True)
|
286 |
return pd.DataFrame(columns=["title", "summary", "source", "authors", "similarity"])
|
287 |
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
if initial_results.empty:
|
297 |
-
return initial_results
|
298 |
-
|
299 |
-
# Filter by similarity threshold
|
300 |
-
filtered_results = initial_results[initial_results['similarity'] >= similarity_threshold]
|
301 |
-
|
302 |
-
# Return top-k of filtered results
|
303 |
-
return filtered_results.head(top_k).reset_index(drop=True)
|
304 |
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
"""
|
310 |
-
# Get more results initially to determine distribution
|
311 |
-
initial_results = self.search(query, top_k=top_k*3)
|
312 |
|
313 |
-
|
314 |
-
|
|
|
|
|
|
|
|
|
315 |
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
self.index_shards = []
|
81 |
self.metadata_mgr = MetadataManager()
|
82 |
self.shard_sizes = []
|
83 |
+
|
84 |
+
# Configure search logger
|
85 |
self.logger = logging.getLogger("SemanticSearch")
|
86 |
self.logger.info("Initializing SemanticSearch")
|
87 |
+
|
88 |
@st.cache_resource
|
89 |
def load_model(_self):
|
90 |
return SentenceTransformer('all-MiniLM-L6-v2')
|
91 |
+
|
92 |
def initialize_system(self):
|
93 |
self.logger.info("Loading sentence transformer model")
|
94 |
start_time = time.time()
|
95 |
self.model = self.load_model()
|
96 |
self.logger.info(f"Model loaded in {time.time() - start_time:.2f} seconds")
|
97 |
+
|
98 |
self.logger.info("Loading FAISS indices")
|
99 |
self._load_faiss_shards()
|
100 |
+
|
101 |
def _load_faiss_shards(self):
|
102 |
+
"""Load all FAISS index shards"""
|
103 |
self.logger.info(f"Searching for index files in {self.shard_dir}")
|
104 |
+
|
105 |
if not self.shard_dir.exists():
|
106 |
self.logger.error(f"Shard directory not found: {self.shard_dir}")
|
107 |
return
|
108 |
+
|
109 |
+
index_files = list(self.shard_dir.glob("*.index"))
|
110 |
self.logger.info(f"Found {len(index_files)} index files")
|
111 |
+
|
112 |
self.shard_sizes = []
|
113 |
+
self.index_shards = []
|
114 |
+
|
115 |
+
for shard_path in sorted(index_files):
|
116 |
+
try:
|
117 |
+
self.logger.info(f"Loading index: {shard_path}")
|
118 |
+
start_time = time.time()
|
119 |
+
|
120 |
+
# Log file size
|
121 |
+
file_size_mb = os.path.getsize(shard_path) / (1024 * 1024)
|
122 |
+
self.logger.info(f"Index file size: {file_size_mb:.2f} MB")
|
123 |
+
|
124 |
+
index = faiss.read_index(str(shard_path))
|
125 |
+
self.index_shards.append(index)
|
126 |
+
self.shard_sizes.append(index.ntotal)
|
127 |
+
|
128 |
+
self.logger.info(f"Loaded index with {index.ntotal} vectors in {time.time() - start_time:.2f} seconds")
|
129 |
+
except Exception as e:
|
130 |
+
self.logger.error(f"Failed to load index {shard_path}: {str(e)}")
|
131 |
+
|
132 |
self.total_vectors = sum(self.shard_sizes)
|
133 |
self.logger.info(f"Total loaded vectors: {self.total_vectors} across {len(self.index_shards)} shards")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
|
135 |
def _global_index(self, shard_idx, local_idx):
|
136 |
+
"""Convert local index to global index"""
|
137 |
+
return sum(self.shard_sizes[:shard_idx]) + local_idx
|
138 |
+
|
139 |
+
|
|
|
|
|
|
|
|
|
140 |
def search(self, query, top_k=5):
|
141 |
+
"""Search with validation"""
|
142 |
self.logger.info(f"Searching for query: '{query}' (top_k={top_k})")
|
143 |
start_time = time.time()
|
144 |
+
|
145 |
if not query:
|
146 |
self.logger.warning("Empty query provided")
|
147 |
return pd.DataFrame()
|
148 |
+
|
149 |
if not self.index_shards:
|
150 |
self.logger.error("No index shards loaded")
|
151 |
return pd.DataFrame()
|
152 |
+
|
153 |
try:
|
154 |
self.logger.info("Encoding query")
|
155 |
query_embedding = self.model.encode([query], convert_to_numpy=True)
|
|
|
|
|
156 |
self.logger.debug(f"Query encoded to shape {query_embedding.shape}")
|
157 |
except Exception as e:
|
158 |
self.logger.error(f"Query encoding failed: {str(e)}")
|
159 |
return pd.DataFrame()
|
160 |
+
|
161 |
all_distances = []
|
162 |
all_global_indices = []
|
163 |
+
|
164 |
+
# Search with index validation
|
165 |
+
self.logger.info(f"Searching across {len(self.index_shards)} shards")
|
166 |
+
for shard_idx, index in enumerate(self.index_shards):
|
167 |
+
if index.ntotal == 0:
|
168 |
+
self.logger.warning(f"Skipping empty shard {shard_idx}")
|
169 |
+
continue
|
170 |
+
|
171 |
+
try:
|
172 |
+
shard_start = time.time()
|
173 |
+
distances, indices = index.search(query_embedding, top_k)
|
174 |
+
|
175 |
+
valid_mask = (indices[0] >= 0) & (indices[0] < index.ntotal)
|
176 |
+
valid_indices = indices[0][valid_mask].tolist()
|
177 |
+
valid_distances = distances[0][valid_mask].tolist()
|
178 |
+
|
179 |
+
if len(valid_indices) != top_k:
|
180 |
+
self.logger.debug(f"Shard {shard_idx}: Found {len(valid_indices)} valid results out of {top_k}")
|
181 |
+
|
182 |
+
global_indices = [self._global_index(shard_idx, idx) for idx in valid_indices]
|
183 |
+
|
184 |
+
all_distances.extend(valid_distances)
|
185 |
+
all_global_indices.extend(global_indices)
|
186 |
+
|
187 |
+
self.logger.debug(f"Shard {shard_idx} search completed in {time.time() - shard_start:.3f}s")
|
188 |
+
except Exception as e:
|
189 |
+
self.logger.error(f"Search failed in shard {shard_idx}: {str(e)}")
|
190 |
+
continue
|
191 |
+
|
192 |
self.logger.info(f"Search found {len(all_global_indices)} results across all shards")
|
193 |
|
194 |
+
# Process results
|
195 |
+
results = self._process_results(
|
196 |
+
np.array(all_distances),
|
197 |
+
np.array(all_global_indices),
|
198 |
+
top_k
|
199 |
+
)
|
|
|
|
|
200 |
|
|
|
201 |
self.logger.info(f"Search completed in {time.time() - start_time:.2f} seconds with {len(results)} final results")
|
202 |
return results
|
203 |
|
|
|
286 |
self.logger.error(f"Result processing failed: {str(e)}", exc_info=True)
|
287 |
return pd.DataFrame(columns=["title", "summary", "source", "authors", "similarity"])
|
288 |
|
289 |
+
|
290 |
+
|
291 |
+
|
292 |
+
|
293 |
+
|
294 |
+
def _process_results(self, distances, global_indices, top_k):
|
295 |
+
"""Process raw search results into formatted DataFrame"""
|
296 |
+
process_start = time.time()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
297 |
|
298 |
+
# Proper numpy array emptiness checks
|
299 |
+
if global_indices.size == 0 or distances.size == 0:
|
300 |
+
self.logger.warning("No search results to process")
|
301 |
+
return pd.DataFrame(columns=["title", "summary", "source", "authors", "similarity"])
|
|
|
|
|
|
|
302 |
|
303 |
+
try:
|
304 |
+
# Get metadata for matched indices
|
305 |
+
self.logger.info(f"Retrieving metadata for {len(global_indices)} indices")
|
306 |
+
metadata_start = time.time()
|
307 |
+
results = self.metadata_mgr.get_metadata(global_indices)
|
308 |
+
self.logger.info(f"Metadata retrieved in {time.time() - metadata_start:.2f}s, got {len(results)} records")
|
309 |
|
310 |
+
# Empty results check
|
311 |
+
if len(results) == 0:
|
312 |
+
self.logger.warning("No metadata found for indices")
|
313 |
+
return pd.DataFrame(columns=["title", "summary", "source", "authors", "similarity"])
|
314 |
+
|
315 |
+
# Ensure distances match results length
|
316 |
+
if len(results) != len(distances):
|
317 |
+
self.logger.warning(f"Mismatch between distances ({len(distances)}) and results ({len(results)})")
|
318 |
+
|
319 |
+
if len(results) < len(distances):
|
320 |
+
self.logger.info("Truncating distances array to match results length")
|
321 |
+
distances = distances[:len(results)]
|
322 |
+
else:
|
323 |
+
# Should not happen but handle it anyway
|
324 |
+
self.logger.error("More results than distances - this shouldn't happen")
|
325 |
+
distances = np.pad(distances, (0, len(results) - len(distances)), 'constant', constant_values=1.0)
|
326 |
+
|
327 |
+
# Calculate similarity scores
|
328 |
+
self.logger.debug("Calculating similarity scores")
|
329 |
+
results['similarity'] = 1 - (distances / 2)
|
330 |
+
|
331 |
+
# Log similarity statistics
|
332 |
+
if not results.empty:
|
333 |
+
self.logger.debug(f"Similarity stats: min={results['similarity'].min():.3f}, " +
|
334 |
+
f"max={results['similarity'].max():.3f}, " +
|
335 |
+
f"mean={results['similarity'].mean():.3f}")
|
336 |
+
|
337 |
+
|
338 |
+
# Deduplicate and sort results
|
339 |
+
pre_dedup = len(results)
|
340 |
+
results = results.drop_duplicates(subset=["title", "source"]).sort_values("similarity", ascending=False).head(top_k)
|
341 |
+
post_dedup = len(results)
|
342 |
+
|
343 |
+
if pre_dedup > post_dedup:
|
344 |
+
self.logger.info(f"Removed {pre_dedup - post_dedup} duplicate results")
|
345 |
+
|
346 |
+
self.logger.info(f"Results processed in {time.time() - process_start:.2f}s, returning {len(results)} items")
|
347 |
+
return results.reset_index(drop=True)
|
348 |
+
|
349 |
+
# Add URL resolution for final results only
|
350 |
+
final_results = results.sort_values("similarity", ascending=False).head(top_k)
|
351 |
+
|
352 |
+
# Resolve URLs for top results only
|
353 |
+
# final_results['source'] =
|
354 |
+
|
355 |
+
# Deduplicate based on title only
|
356 |
+
final_results = final_results.drop_duplicates(subset=["title"]).head(top_k)
|
357 |
+
|
358 |
+
return final_results.reset_index(drop=True)
|
359 |
+
|
360 |
+
except Exception as e:
|
361 |
+
self.logger.error(f"Result processing failed: {str(e)}", exc_info=True)
|
362 |
+
return pd.DataFrame(columns=["title", "summary", "similarity"])
|