traopia commited on
Commit
78a4f9f
·
1 Parent(s): 9eafc14
Files changed (1) hide show
  1. src/use_llm.py +10 -4
src/use_llm.py CHANGED
@@ -31,14 +31,20 @@ def main_generate(prompt, model=DEFAULT_MODEL, system_prompt="You are a helpful
31
 
32
 
33
 
34
- # Use your own token securely via Space secrets or local env
35
- HF_TOKEN = os.getenv("HF_TOKEN") # define this in Hugging Face Space Secrets
36
- MODEL_ID = "thenlper/gte-large" # or another embedding model like BAAI/bge-base-en
 
 
37
 
38
  client = InferenceClient(model=MODEL_ID, token=HF_TOKEN)
39
 
40
  def get_embeddings(texts):
41
  if isinstance(texts, str):
42
  texts = [texts]
43
- embeddings = [client.embed(text) for text in texts]
 
 
 
 
44
  return embeddings
 
31
 
32
 
33
 
34
+ import os
35
+ from huggingface_hub import InferenceClient
36
+
37
+ HF_TOKEN = os.getenv("HF_TOKEN")
38
+ MODEL_ID = "thenlper/gte-large" # embedding model
39
 
40
  client = InferenceClient(model=MODEL_ID, token=HF_TOKEN)
41
 
42
  def get_embeddings(texts):
43
  if isinstance(texts, str):
44
  texts = [texts]
45
+ embeddings = []
46
+ for text in texts:
47
+ response = client.text_to_vector(text)
48
+ # response is usually a list of floats (the embedding vector)
49
+ embeddings.append(response)
50
  return embeddings