Spaces:
Running
Running
File size: 1,442 Bytes
a457e2e b530936 d838202 4e96bf5 a457e2e b530936 4e96bf5 a457e2e 854864a a457e2e 854864a a457e2e 4e96bf5 d838202 a457e2e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import openai
from transformers import BertTokenizer
from evo_model import EvoTransformerForClassification
import torch
# Load Evo model
model = EvoTransformerForClassification.from_pretrained("trained_model")
model.eval()
# Tokenizer (BERT-compatible)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
# Set OpenAI key (assumes you have it set as ENV VAR or replace directly)
import os
openai.api_key = os.getenv("OPENAI_API_KEY")
def query_gpt35(prompt):
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
max_tokens=50,
temperature=0.3,
)
return response['choices'][0]['message']['content'].strip()
except Exception as e:
return f"[GPT-3.5 Error] {e}"
def generate_response(goal, option1, option2):
# Evo prediction
prompt = f"Goal: {goal}\nOption 1: {option1}\nOption 2: {option2}\nWhich is better?"
inputs = tokenizer([goal + " " + option1, goal + " " + option2],
return_tensors="pt", padding=True, truncation=True)
with torch.no_grad():
logits = model(**inputs)
pred = torch.argmax(logits, dim=1).item()
evo_result = option1 if pred == 0 else option2
# GPT-3.5 prediction
gpt_result = query_gpt35(prompt)
return {
"evo_suggestion": evo_result,
"gpt_suggestion": gpt_result
}
|