File size: 2,570 Bytes
6523110
cf0b712
 
6523110
cf0b712
 
6523110
cf0b712
 
6523110
cf0b712
6523110
cf0b712
 
 
 
6523110
cf0b712
 
 
 
 
 
6523110
cf0b712
6523110
cf0b712
 
 
 
 
 
 
 
 
 
 
 
 
00c29b3
cf0b712
 
 
 
 
00c29b3
 
 
cf0b712
 
 
 
 
00c29b3
 
cf0b712
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81

# from huggingface_hub import InferenceClient
# import os

# # Use a Hugging Face inference endpoint like "google/gemma-1.1-7b-it"
# # You must have access to this model (either public or via token)

# HUGGINGFACE_API_TOKEN = os.getenv("HF_TOKEN")  # Add this in your HF Space's secret settings
# DEFAULT_MODEL = "google/gemma-1.1-7b-it"

# client = InferenceClient(DEFAULT_MODEL, token=HUGGINGFACE_API_TOKEN)

# def send_chat_prompt(prompt: str, model: str, system_prompt: str) -> str:
#     full_prompt = f"<|start_of_turn|>system\n{system_prompt}<|end_of_turn|>\n" \
#                   f"<|start_of_turn|>user\n{prompt}<|end_of_turn|>\n" \
#                   f"<|start_of_turn|>assistant\n"
    
#     response = client.text_generation(
#         prompt=full_prompt,
#         max_new_tokens=500,
#         temperature=0.5,
#         stop_sequences=["<|end_of_turn|>"]
#     )
    
#     return response.strip()

# def main_generate(prompt, model=DEFAULT_MODEL, system_prompt="You are a helpful assistant that generates SPARQL queries."):
#     response = send_chat_prompt(prompt, model, system_prompt)
#     response = response.replace('```', '').replace('json', '').strip()
#     return response






# from sentence_transformers import SentenceTransformer

# model = SentenceTransformer("thenlper/gte-large")  # downloaded from Hugging Face

# def get_embeddings(texts):
#     if isinstance(texts, str):
#         texts = [texts]
#     embeddings = model.encode(texts, convert_to_numpy=True)
#     return embeddings






import ollama 
import openai 

def get_embeddings(texts):
    response = ollama.embed(model="mxbai-embed-large", input=texts)
    embeddings = response["embeddings"]
    return embeddings


openai_api_key = "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"
#generate
def send_chat_prompt(prompt, model, system_prompt ):
    client = openai.OpenAI(
            base_url="http://localhost:11434/v1" if not "gpt" in model else None,
            api_key= "ollama" if not "gpt" in model else openai_api_key)
    resp = client.chat.completions.create(
        model=model,
        temperature = 0.5 ,
        messages=[
                {"role": "system", "content": system_prompt},  
                {"role": "user", "content": prompt}])
    response = resp.choices[0].message.content
    return response

def main_generate(prompt,model, system_prompt):
    response = send_chat_prompt(prompt,model, system_prompt)
    response = response.replace('```','').replace('json','')
    #print(f" {model} Response:", response)
    return response