semantic-search / search_utils.py
Testys's picture
Update search_utils.py
dd2e007 verified
import numpy as np
import pandas as pd
import faiss
import zipfile
import logging
from pathlib import Path
from sentence_transformers import SentenceTransformer, util
import streamlit as st
import time
import os
from urllib.parse import quote
import requests
import shutil
import concurrent.futures
from functools import lru_cache
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[logging.StreamHandler()]
)
logger = logging.getLogger("MetadataManager")
class MetadataManager:
def __init__(self):
self.metadata_path = Path("combined.parquet")
self.df = None
self.total_docs = 0
logger.info("Initializing MetadataManager")
self._load_metadata()
logger.info(f"Total documents indexed: {self.total_docs}")
def _load_metadata(self):
"""Load the combined parquet file directly"""
logger.info("Loading metadata from combined.parquet")
try:
# Load the parquet file
self.df = pd.read_parquet(self.metadata_path)
# Clean and format the data
self.df['source'] = self.df['source'].apply(
lambda x: [
url.strip()
for url in str(x).split(';')
if url.strip() and url.startswith('http')
]
)
self.total_docs = len(self.df)
logger.info(f"Successfully loaded {self.total_docs} documents")
except Exception as e:
logger.error(f"Failed to load metadata: {str(e)}")
raise
def get_metadata(self, global_indices):
"""Retrieve metadata for given indices with deduplication by title"""
if isinstance(global_indices, np.ndarray) and global_indices.size == 0:
return pd.DataFrame(columns=["title", "summary", 'authors', "similarity", "source"])
try:
# Directly index the DataFrame
results = self.df.iloc[global_indices].copy()
# Deduplicate by title to avoid near-duplicate results
if len(results) > 1:
results = results.drop_duplicates(subset=["title"])
return results
except Exception as e:
logger.error(f"Metadata retrieval failed: {str(e)}")
return pd.DataFrame(columns=["title", "summary", "similarity", "source", 'authors'])
class SemanticSearch:
def __init__(self):
self.shard_dir = Path("compressed_shards")
self.model = None
self.index_shards = []
self.metadata_mgr = MetadataManager()
self.shard_sizes = []
# Configure search logger
self.logger = logging.getLogger("SemanticSearch")
self.logger.info("Initializing SemanticSearch")
@st.cache_resource
def load_model(_self):
return SentenceTransformer('all-MiniLM-L6-v2')
def initialize_system(self):
self.logger.info("Loading sentence transformer model")
start_time = time.time()
self.model = self.load_model()
self.logger.info(f"Model loaded in {time.time() - start_time:.2f} seconds")
self.logger.info("Loading FAISS indices")
self._load_faiss_shards()
def _load_faiss_shards(self):
"""Load all FAISS index shards"""
self.logger.info(f"Searching for index files in {self.shard_dir}")
if not self.shard_dir.exists():
self.logger.error(f"Shard directory not found: {self.shard_dir}")
return
index_files = list(self.shard_dir.glob("*.index"))
self.logger.info(f"Found {len(index_files)} index files")
self.shard_sizes = []
self.index_shards = []
for shard_path in sorted(index_files):
try:
self.logger.info(f"Loading index: {shard_path}")
start_time = time.time()
# Log file size
file_size_mb = os.path.getsize(shard_path) / (1024 * 1024)
self.logger.info(f"Index file size: {file_size_mb:.2f} MB")
index = faiss.read_index(str(shard_path))
self.index_shards.append(index)
self.shard_sizes.append(index.ntotal)
self.logger.info(f"Loaded index with {index.ntotal} vectors in {time.time() - start_time:.2f} seconds")
except Exception as e:
self.logger.error(f"Failed to load index {shard_path}: {str(e)}")
self.total_vectors = sum(self.shard_sizes)
self.logger.info(f"Total loaded vectors: {self.total_vectors} across {len(self.index_shards)} shards")
def _global_index(self, shard_idx, local_idx):
"""Convert local index to global index"""
return sum(self.shard_sizes[:shard_idx]) + local_idx
def search(self, query, top_k=5):
"""Search with validation"""
self.logger.info(f"Searching for query: '{query}' (top_k={top_k})")
start_time = time.time()
if not query:
self.logger.warning("Empty query provided")
return pd.DataFrame()
if not self.index_shards:
self.logger.error("No index shards loaded")
return pd.DataFrame()
try:
self.logger.info("Encoding query")
query_embedding = self.model.encode([query], convert_to_numpy=True)
self.logger.debug(f"Query encoded to shape {query_embedding.shape}")
except Exception as e:
self.logger.error(f"Query encoding failed: {str(e)}")
return pd.DataFrame()
all_distances = []
all_global_indices = []
# Search with index validation
self.logger.info(f"Searching across {len(self.index_shards)} shards")
for shard_idx, index in enumerate(self.index_shards):
if index.ntotal == 0:
self.logger.warning(f"Skipping empty shard {shard_idx}")
continue
try:
shard_start = time.time()
distances, indices = index.search(query_embedding, top_k)
valid_mask = (indices[0] >= 0) & (indices[0] < index.ntotal)
valid_indices = indices[0][valid_mask].tolist()
valid_distances = distances[0][valid_mask].tolist()
if len(valid_indices) != top_k:
self.logger.debug(f"Shard {shard_idx}: Found {len(valid_indices)} valid results out of {top_k}")
global_indices = [self._global_index(shard_idx, idx) for idx in valid_indices]
all_distances.extend(valid_distances)
all_global_indices.extend(global_indices)
self.logger.debug(f"Shard {shard_idx} search completed in {time.time() - shard_start:.3f}s")
except Exception as e:
self.logger.error(f"Search failed in shard {shard_idx}: {str(e)}")
continue
self.logger.info(f"Search found {len(all_global_indices)} results across all shards")
# Process results
results = self._process_results(
np.array(all_distances),
np.array(all_global_indices),
top_k
)
self.logger.info(f"Search completed in {time.time() - start_time:.2f} seconds with {len(results)} final results")
return results
def _search_shard(self, shard_idx, index, query_embedding, top_k):
"""Search a single FAISS shard for the query embedding with proper error handling."""
if index.ntotal == 0:
self.logger.warning(f"Skipping empty shard {shard_idx}")
return None
try:
shard_start = time.time()
distances, indices = index.search(query_embedding, top_k)
# Filter out invalid indices (-1 is returned by FAISS for insufficient results)
valid_mask = (indices[0] >= 0) & (indices[0] < index.ntotal)
valid_indices = indices[0][valid_mask]
valid_distances = distances[0][valid_mask]
if len(valid_indices) == 0:
self.logger.debug(f"Shard {shard_idx}: No valid results found")
return None
if len(valid_indices) != top_k:
self.logger.debug(f"Shard {shard_idx}: Found {len(valid_indices)} valid results out of {top_k}")
global_indices = [self._global_index(shard_idx, idx) for idx in valid_indices]
# Filter out any invalid global indices (could happen if _global_index validation fails)
valid_global = [(d, i) for d, i in zip(valid_distances, global_indices) if i >= 0]
if not valid_global:
return None
final_distances, final_indices = zip(*valid_global)
self.logger.debug(f"Shard {shard_idx} search completed in {time.time() - shard_start:.3f}s")
return final_distances, final_indices
except Exception as e:
self.logger.error(f"Search failed in shard {shard_idx}: {str(e)}")
return None
def _process_results(self, distances, global_indices, top_k):
"""Process raw search results into formatted DataFrame"""
process_start = time.time()
# Proper numpy array emptiness checks
if global_indices.size == 0 or distances.size == 0:
self.logger.warning("No search results to process")
return pd.DataFrame(columns=["title", "summary", "source", "authors", "similarity"])
try:
# Get metadata for matched indices
self.logger.info(f"Retrieving metadata for {len(global_indices)} indices")
metadata_start = time.time()
results = self.metadata_mgr.get_metadata(global_indices)
self.logger.info(f"Metadata retrieved in {time.time() - metadata_start:.2f}s, got {len(results)} records")
# Empty results check
if len(results) == 0:
self.logger.warning("No metadata found for indices")
return pd.DataFrame(columns=["title", "summary", "source", "authors", "similarity"])
# Ensure distances match results length
if len(results) != len(distances):
self.logger.warning(f"Mismatch between distances ({len(distances)}) and results ({len(results)})")
if len(results) < len(distances):
self.logger.info("Truncating distances array to match results length")
distances = distances[:len(results)]
else:
# Should not happen but handle it anyway
self.logger.error("More results than distances - this shouldn't happen")
distances = np.pad(distances, (0, len(results) - len(distances)), 'constant', constant_values=1.0)
# Calculate similarity scores
self.logger.debug("Calculating similarity scores")
results['similarity'] = 1 - (distances / 2)
# Log similarity statistics
if not results.empty:
self.logger.debug(f"Similarity stats: min={results['similarity'].min():.3f}, " +
f"max={results['similarity'].max():.3f}, " +
f"mean={results['similarity'].mean():.3f}")
# Deduplicate and sort results
pre_dedup = len(results)
results = results.drop_duplicates(subset=["title"]).sort_values("similarity", ascending=False).head(top_k)
post_dedup = len(results)
if pre_dedup > post_dedup:
self.logger.info(f"Removed {pre_dedup - post_dedup} duplicate results")
self.logger.info(f"Results processed in {time.time() - process_start:.2f}s, returning {len(results)} items")
return results.reset_index(drop=True)
# Add URL resolution for final results only
final_results = results.sort_values("similarity", ascending=False).head(top_k)
# Resolve URLs for top results only
# final_results['source'] =
# Deduplicate based on title only
final_results = final_results.drop_duplicates(subset=["title"]).head(top_k)
return final_results.reset_index(drop=True)
except Exception as e:
self.logger.error(f"Result processing failed: {str(e)}", exc_info=True)
return pd.DataFrame(columns=["title", "summary", "similarity", 'authors'])