HemanM commited on
Commit
777a225
·
verified ·
1 Parent(s): a05fb50

Update inference.py

Browse files
Files changed (1) hide show
  1. inference.py +9 -8
inference.py CHANGED
@@ -10,8 +10,6 @@ import psutil
10
  import platform
11
  import GPUtil
12
  import openai
13
- import GPUtil
14
-
15
 
16
  # Load tokenizer
17
  tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
@@ -45,12 +43,14 @@ def evo_chat_predict(history, question, options):
45
 
46
  def get_gpt_response(prompt):
47
  openai.api_key = os.getenv("OPENAI_API_KEY", "sk-...")
 
48
  try:
49
- res = openai.ChatCompletion.create(
 
50
  model="gpt-3.5-turbo",
51
  messages=[{"role": "user", "content": prompt}]
52
  )
53
- return res.choices[0].message["content"]
54
  except Exception as e:
55
  return f"(GPT Error) {e}"
56
 
@@ -94,8 +94,9 @@ def retrain_from_feedback_csv():
94
  with open(FEEDBACK_LOG, "r", encoding="utf-8") as f:
95
  reader = csv.DictReader(f)
96
  for row in reader:
97
- if row.get("vote") in ["Evo", "GPT"]:
98
- label = 1 if row["vote"] == "Evo" else 0
 
99
  input_text = f"{row['question']} {row['option1']} {row['option2']}"
100
  data.append((input_text, label))
101
 
@@ -114,17 +115,17 @@ def retrain_from_feedback_csv():
114
  optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
115
  for epoch in range(3):
116
  random.shuffle(data)
117
- total_loss = 0.0
118
  for text, label in data:
119
  enc = tokenizer(text, padding="max_length", truncation=True, max_length=128, return_tensors="pt").to(device)
120
  input_ids = enc["input_ids"]
121
  label_tensor = torch.tensor([label], dtype=torch.float32).to(device)
122
  logits = model(input_ids)
 
 
123
  loss = F.binary_cross_entropy_with_logits(logits.squeeze(), label_tensor)
124
  optimizer.zero_grad()
125
  loss.backward()
126
  optimizer.step()
127
- total_loss += loss.item()
128
  model.eval()
129
  return f"✅ Evo retrained on {len(data)} feedback entries."
130
 
 
10
  import platform
11
  import GPUtil
12
  import openai
 
 
13
 
14
  # Load tokenizer
15
  tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
 
43
 
44
  def get_gpt_response(prompt):
45
  openai.api_key = os.getenv("OPENAI_API_KEY", "sk-...")
46
+
47
  try:
48
+ client = openai.OpenAI()
49
+ response = client.chat.completions.create(
50
  model="gpt-3.5-turbo",
51
  messages=[{"role": "user", "content": prompt}]
52
  )
53
+ return response.choices[0].message.content.strip()
54
  except Exception as e:
55
  return f"(GPT Error) {e}"
56
 
 
94
  with open(FEEDBACK_LOG, "r", encoding="utf-8") as f:
95
  reader = csv.DictReader(f)
96
  for row in reader:
97
+ vote = row.get("user_preference") or row.get("vote")
98
+ if vote in ["Evo", "GPT"]:
99
+ label = 1 if vote == "Evo" else 0
100
  input_text = f"{row['question']} {row['option1']} {row['option2']}"
101
  data.append((input_text, label))
102
 
 
115
  optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
116
  for epoch in range(3):
117
  random.shuffle(data)
 
118
  for text, label in data:
119
  enc = tokenizer(text, padding="max_length", truncation=True, max_length=128, return_tensors="pt").to(device)
120
  input_ids = enc["input_ids"]
121
  label_tensor = torch.tensor([label], dtype=torch.float32).to(device)
122
  logits = model(input_ids)
123
+ if logits.ndim == 2:
124
+ logits = logits.squeeze(1)
125
  loss = F.binary_cross_entropy_with_logits(logits.squeeze(), label_tensor)
126
  optimizer.zero_grad()
127
  loss.backward()
128
  optimizer.step()
 
129
  model.eval()
130
  return f"✅ Evo retrained on {len(data)} feedback entries."
131