gigiliu12 commited on
Commit
6758cdb
Β·
verified Β·
1 Parent(s): c43bc47

updated cou

Browse files
Files changed (1) hide show
  1. app.py +14 -1
app.py CHANGED
@@ -58,7 +58,13 @@ def load_embeddings():
58
  return ckpt["ids"], ckpt["embeddings"]
59
 
60
  ids_list, emb_tensor = load_embeddings()
61
-
 
 
 
 
 
 
62
  # ────────────────────────────────────────────────────────────────────────
63
  # 3) Streamlit UI
64
  # ────────────────────────────────────────────────────────────────────────
@@ -100,6 +106,13 @@ if search_clicked:
100
  with st.spinner("Embedding & searching…"):
101
  model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
102
  q_vec = model.encode(sem_query.strip(), convert_to_tensor=True).cpu()
 
 
 
 
 
 
 
103
 
104
  sims = util.cos_sim(q_vec, emb_tensor)[0]
105
  top_vals, top_idx = torch.topk(sims, k=50) # get 50 candidates
 
58
  return ckpt["ids"], ckpt["embeddings"]
59
 
60
  ids_list, emb_tensor = load_embeddings()
61
+ @st.cache_resource
62
+ def get_st_model():
63
+ # force CPU so we avoid the meta-tensor copy error
64
+ return SentenceTransformer(
65
+ "sentence-transformers/all-MiniLM-L6-v2",
66
+ device="cpu",
67
+ )
68
  # ────────────────────────────────────────────────────────────────────────
69
  # 3) Streamlit UI
70
  # ────────────────────────────────────────────────────────────────────────
 
106
  with st.spinner("Embedding & searching…"):
107
  model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
108
  q_vec = model.encode(sem_query.strip(), convert_to_tensor=True).cpu()
109
+
110
+ model = get_st_model() # ← cached instance
111
+ q_vec = model.encode(
112
+ sem_query.strip(),
113
+ convert_to_tensor=True,
114
+ device="cpu"
115
+ ).cpu()
116
 
117
  sims = util.cos_sim(q_vec, emb_tensor)[0]
118
  top_vals, top_idx = torch.topk(sims, k=50) # get 50 candidates