Spaces:
Sleeping
Sleeping
import torch | |
from transformers import AutoTokenizer | |
from evo_model import EvoTransformer | |
from rag_utils import extract_text_from_file | |
from search_utils import web_search_and_format | |
# Load Evo model and tokenizer | |
model_path = "evo_hellaswag.pt" | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
model = EvoTransformer() | |
model.load_state_dict(torch.load(model_path, map_location=device)) | |
model.to(device) | |
model.eval() | |
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") | |
def get_evo_response(query, context="", file=None, enable_search=True): | |
rag_context = "" | |
if file is not None: | |
rag_context += extract_text_from_file(file) | |
if enable_search: | |
search_context = web_search_and_format(query) | |
rag_context += "\n" + search_context | |
full_context = f"{context}\n{rag_context}".strip() | |
# Define hypothetical options (can be more sophisticated later) | |
option1 = "Yes, take action." | |
option2 = "No, do not take action." | |
inputs = [ | |
f"Q: {query} Context: {full_context} A: {option1}", | |
f"Q: {query} Context: {full_context} A: {option2}", | |
] | |
encoded = tokenizer(inputs, padding=True, truncation=True, return_tensors="pt").to(device) | |
with torch.no_grad(): | |
logits = model(encoded["input_ids"]).squeeze(-1) | |
probs = torch.softmax(logits, dim=0) | |
best = torch.argmax(probs).item() | |
answer = option1 if best == 0 else option2 | |
reasoning = ( | |
f"✅ Evo suggests: **{answer}**\n\n" | |
f"🧠 Confidence: {probs[best]:.2f}\n" | |
f"📖 Context used:\n{full_context[:1000]}..." # limit to 1000 chars | |
) | |
return answer, reasoning | |
def get_gpt_response(query, context=""): | |
import openai | |
openai.api_key = "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" # Make sure to secure this | |
prompt = f"Q: {query}\nContext: {context}\nA:" | |
response = openai.ChatCompletion.create( | |
model="gpt-3.5-turbo", | |
messages=[{"role": "user", "content": prompt}], | |
temperature=0.3 | |
) | |
return response["choices"][0]["message"]["content"].strip() | |