HemanM commited on
Commit
62adefb
Β·
verified Β·
1 Parent(s): 63d9bd3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -11
app.py CHANGED
@@ -16,14 +16,11 @@ import time
16
  # βœ… Secure OpenAI API key
17
  openai.api_key = os.getenv("OPENAI_API_KEY")
18
 
19
- # βœ… Use CPU or GPU
20
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
21
 
22
- # βœ… Load PIQA from Hugging Face JSON (safe for Spaces)
23
- dataset = load_dataset("json", data_files={
24
- "train": "https://huggingface.co/datasets/AI-Sweden/piqa-downsampled/resolve/main/train.json",
25
- "validation": "https://huggingface.co/datasets/AI-Sweden/piqa-downsampled/resolve/main/validation.json"
26
- })
27
  tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
28
 
29
  def tokenize_choices(example):
@@ -69,7 +66,7 @@ def gpt35_answer(prompt):
69
  except Exception as e:
70
  return f"[Error: {e}]"
71
 
72
- # βœ… Training + Evaluation function
73
  def train_and_demo(few_shot_size):
74
  start_time = time.time()
75
  model = EvoTransformer().to(device)
@@ -125,7 +122,7 @@ def train_and_demo(few_shot_size):
125
  if early_stop >= patience:
126
  break
127
 
128
- # βœ… Accuracy plot
129
  fig, ax = plt.subplots()
130
  ax.plot(accs, marker='o')
131
  ax.set_title(f"Validation Accuracy ({few_shot_size} examples)")
@@ -136,7 +133,7 @@ def train_and_demo(few_shot_size):
136
  buf.seek(0)
137
  img = Image.open(buf)
138
 
139
- # βœ… Show comparison examples
140
  output = ""
141
  for i in range(2):
142
  ex = dataset["validation"][i]
@@ -170,7 +167,7 @@ EvoTransformer v2.1 Configuration:
170
 
171
  return img, f"Best Accuracy: {best_val:.4f}", output.strip() + "\n\n" + architecture_info.strip()
172
 
173
- # βœ… Gradio Interface
174
  gr.Interface(
175
  fn=train_and_demo,
176
  inputs=gr.Slider(10, 500, step=10, value=50, label="Number of Training Examples"),
@@ -180,5 +177,5 @@ gr.Interface(
180
  gr.Textbox(label="Evo vs GPT-3.5 Output")
181
  ],
182
  title="🧬 EvoTransformer v2.1 Benchmark",
183
- description="Train EvoTransformer on PIQA and compare its predictions to GPT-3.5."
184
  ).launch()
 
16
  # βœ… Secure OpenAI API key
17
  openai.api_key = os.getenv("OPENAI_API_KEY")
18
 
19
+ # βœ… Use GPU if available
20
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
21
 
22
+ # βœ… Load official PIQA dataset with remote code trust enabled
23
+ dataset = load_dataset("piqa", trust_remote_code=True)
 
 
 
24
  tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
25
 
26
  def tokenize_choices(example):
 
66
  except Exception as e:
67
  return f"[Error: {e}]"
68
 
69
+ # βœ… Training and evaluation function
70
  def train_and_demo(few_shot_size):
71
  start_time = time.time()
72
  model = EvoTransformer().to(device)
 
122
  if early_stop >= patience:
123
  break
124
 
125
+ # βœ… Accuracy Plot
126
  fig, ax = plt.subplots()
127
  ax.plot(accs, marker='o')
128
  ax.set_title(f"Validation Accuracy ({few_shot_size} examples)")
 
133
  buf.seek(0)
134
  img = Image.open(buf)
135
 
136
+ # βœ… GPT vs Evo Predictions
137
  output = ""
138
  for i in range(2):
139
  ex = dataset["validation"][i]
 
167
 
168
  return img, f"Best Accuracy: {best_val:.4f}", output.strip() + "\n\n" + architecture_info.strip()
169
 
170
+ # βœ… Gradio interface
171
  gr.Interface(
172
  fn=train_and_demo,
173
  inputs=gr.Slider(10, 500, step=10, value=50, label="Number of Training Examples"),
 
177
  gr.Textbox(label="Evo vs GPT-3.5 Output")
178
  ],
179
  title="🧬 EvoTransformer v2.1 Benchmark",
180
+ description="Train EvoTransformer live on PIQA and compare with GPT-3.5."
181
  ).launch()