TuringsSolutions commited on
Commit
c136c7c
·
verified ·
1 Parent(s): 371c779

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +306 -0
app.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import json
4
+ import random
5
+ import time
6
+ from huggingface_hub import InferenceClient
7
+ import google.generativeai as genai
8
+
9
+ # --- Helper Functions ---
10
+
11
+ def log_message(message, type='info'):
12
+ """Helper to format log messages with a timestamp."""
13
+ timestamp = time.strftime("%H:%M:%S")
14
+ # Simple prefixes for log clarity in the textbox
15
+ if type == 'success':
16
+ return f"[{timestamp}] SUCCESS: {message}"
17
+ if type == 'fail':
18
+ return f"[{timestamp}] FAIL: {message}"
19
+ if type == 'best':
20
+ return f"[{timestamp}] *** {message} ***"
21
+ return f"[{timestamp}] {message}"
22
+
23
+ # --- Core GEPA Functions (Python Implementation) ---
24
+
25
+ def run_huggingface_rollout(client, model_id, prompt, input_text):
26
+ """
27
+ Calls the Hugging Face Inference API for the target model (e.g., Gemma).
28
+ This function performs a "rollout" for a given prompt and input.
29
+ """
30
+ full_prompt = f"<start_of_turn>user\n{prompt}\n\nText: \"{input_text}\"<end_of_turn>\n<start_of_turn>model\n"
31
+ try:
32
+ response = client.text_generation(
33
+ model=model_id,
34
+ prompt=full_prompt,
35
+ max_new_tokens=100,
36
+ do_sample=True,
37
+ temperature=0.7,
38
+ top_p=0.95
39
+ )
40
+ return response
41
+ except Exception as e:
42
+ # Provide a more specific error message for common issues
43
+ err_str = str(e).lower()
44
+ if "authorization" in err_str:
45
+ raise gr.Error(f"Hugging Face API Error: Authorization failed. Please ensure your HF Token is correct and that you have accepted the terms for the model '{model_id}' on its Hugging Face page.")
46
+ if "not found" in err_str:
47
+ raise gr.Error(f"Hugging Face API Error: Model '{model_id}' not found or requires a Pro subscription.")
48
+ raise gr.Error(f"Hugging Face API Error: {str(e)}")
49
+
50
+
51
+ def evaluation_and_feedback_function(output, task):
52
+ """
53
+ The evaluation function (μ_f in the paper).
54
+ This function scores the model's output and provides textual feedback.
55
+ IMPORTANT: This is the most critical part to customize for a specific task.
56
+ """
57
+ # --- CUSTOMIZE THIS FUNCTION ---
58
+ # This example checks for keyword presence. For a real task, you might use
59
+ # regex, semantic similarity, code compilation, etc.
60
+ score = 0.0
61
+ feedback = ""
62
+ found_keywords = 0
63
+ expected_keywords = task.get("expected_keywords", [])
64
+
65
+ if not expected_keywords:
66
+ return {
67
+ "score": 0.0,
68
+ "feedback": "No evaluation criteria (expected_keywords) found in training data for this task."
69
+ }
70
+
71
+ for keyword in expected_keywords:
72
+ if keyword.lower() in output.lower():
73
+ found_keywords += 1
74
+ feedback += f"SUCCESS: Output correctly contained the keyword '{keyword}'.\n"
75
+ else:
76
+ feedback += f"FAILURE: Output was missing the required keyword '{keyword}'.\n"
77
+
78
+ score = found_keywords / len(expected_keywords) if expected_keywords else 0.0
79
+ feedback += f"Final Score for this task: {score:.2f}"
80
+ return {"score": score, "feedback": feedback}
81
+ # --- END CUSTOMIZATION ---
82
+
83
+
84
+ def reflect_and_propose_new_prompt(gemini_model, current_prompt, examples):
85
+ """
86
+ Performs the Reflective Prompt Mutation step using a powerful LLM (Gemini).
87
+ """
88
+ examples_text = '---'.join(
89
+ f'Task Input: "{e["input"]}"\nGenerated Output: "{e["output"]}"\nFeedback:\n{e["feedback"]}\n\n'
90
+ for e in examples
91
+ )
92
+
93
+ reflection_prompt = f"""You are an expert prompt engineer. Your task is to refine a prompt to improve its performance based on feedback from previous attempts.
94
+
95
+ Here is the current prompt that needs improvement:
96
+ --- CURRENT PROMPT ---
97
+ {current_prompt}
98
+ --------------------
99
+
100
+ Here are examples of how the prompt performed on a few tasks, along with feedback on what went wrong or right:
101
+ --- EXAMPLES & FEEDBACK ---
102
+ {examples_text}
103
+ -------------------------
104
+
105
+ Based on this analysis, your task is to write a new, improved prompt. The new prompt should be a complete set of instructions that directly addresses the failures and incorporates the successful strategies observed in the feedback. Do not just give suggestions; provide the full, ready-to-use prompt.
106
+ Your response should ONLY contain the new prompt text, and nothing else."""
107
+
108
+ try:
109
+ response = gemini_model.generate_content(reflection_prompt)
110
+ return response.text.strip()
111
+ except Exception as e:
112
+ raise gr.Error(f"Gemini API Error: {str(e)}. Check your Gemini API Key.")
113
+
114
+
115
+ def select_candidate_for_mutation(candidate_pool, num_tasks):
116
+ """
117
+ Selects the next candidate to mutate based on the Pareto-based strategy.
118
+ """
119
+ if len(candidate_pool) == 1:
120
+ return candidate_pool[0]
121
+
122
+ best_scores_per_task = [-1.0] * num_tasks
123
+ for candidate in candidate_pool:
124
+ for i in range(num_tasks):
125
+ if candidate["scores"][i] > best_scores_per_task[i]:
126
+ best_scores_per_task[i] = candidate["scores"][i]
127
+
128
+ pareto_front_ids = set()
129
+ for i in range(num_tasks):
130
+ for candidate in candidate_pool:
131
+ if abs(candidate["scores"][i] - best_scores_per_task[i]) < 1e-6:
132
+ pareto_front_ids.add(candidate["id"])
133
+
134
+ if not pareto_front_ids:
135
+ return max(candidate_pool, key=lambda c: c["avg_score"])
136
+
137
+ selected_id = random.choice(list(pareto_front_ids))
138
+ return next(c for c in candidate_pool if c["id"] == selected_id)
139
+
140
+
141
+ # --- Main Gradio Application Logic ---
142
+
143
+ def run_gepa_optimization(hf_token_from_input, gemini_key_from_input, model_id, seed_prompt, training_data_str, budget):
144
+ """
145
+ The main function that orchestrates the GEPA optimization process.
146
+ This is a generator function that yields updates to the Gradio UI.
147
+ """
148
+ # --- Get API Keys from Secrets or Inputs ---
149
+ hf_token = os.environ.get("HF_TOKEN") or hf_token_from_input
150
+ gemini_key = os.environ.get("GEMINI_API_KEY") or gemini_key_from_input
151
+
152
+ # --- Validate Inputs ---
153
+ if not hf_token:
154
+ raise gr.Error("Hugging Face API Token is required. Add it as a Space Secret named HF_TOKEN or enter it in the textbox.")
155
+ if not gemini_key:
156
+ raise gr.Error("Google Gemini API Key is required. Add it as a Space Secret named GEMINI_API_KEY or enter it in the textbox.")
157
+ try:
158
+ training_data = json.loads(training_data_str)
159
+ if not isinstance(training_data, list) or not all(isinstance(item, dict) for item in training_data):
160
+ raise ValueError()
161
+ except (json.JSONDecodeError, ValueError):
162
+ raise gr.Error("Training Data is not valid JSON. It should be a list of objects.")
163
+
164
+ # --- Initialization ---
165
+ log_history = []
166
+ hf_client = InferenceClient(token=hf_token)
167
+ genai.configure(api_key=gemini_key)
168
+ gemini_model = genai.GenerativeModel('gemini-1.5-flash')
169
+
170
+ rollout_count = 0
171
+ candidate_pool = []
172
+ best_candidate = {
173
+ "prompt": "Initializing...",
174
+ "avg_score": 0.0
175
+ }
176
+
177
+ def get_current_state():
178
+ return "\n".join(log_history), best_candidate["prompt"], f"{best_candidate['avg_score']:.2f}"
179
+
180
+ # --- Initial Evaluation of Seed Prompt ---
181
+ log_history.append(log_message("Initializing with seed prompt..."))
182
+ yield get_current_state()
183
+
184
+ initial_candidate = {"id": 0, "prompt": seed_prompt, "parentId": None, "scores": [0.0] * len(training_data), "avg_score": 0.0}
185
+ total_score = 0.0
186
+ for i, task in enumerate(training_data):
187
+ log_history.append(log_message(f" - Evaluating seed on task {i+1}..."))
188
+ yield get_current_state()
189
+ output = run_huggingface_rollout(hf_client, model_id, initial_candidate["prompt"], task["input"])
190
+ eval_result = evaluation_and_feedback_function(output, task)
191
+ initial_candidate["scores"][i] = eval_result["score"]
192
+ total_score += eval_result["score"]
193
+ rollout_count += 1
194
+
195
+ initial_candidate["avg_score"] = total_score / len(training_data) if training_data else 0.0
196
+ candidate_pool.append(initial_candidate)
197
+ best_candidate = initial_candidate
198
+
199
+ log_history.append(log_message(f"Seed prompt initial score: {initial_candidate['avg_score']:.2f}", 'best'))
200
+ yield get_current_state()
201
+
202
+ # --- Main Optimization Loop ---
203
+ while rollout_count < budget:
204
+ log_history.append(log_message(f"--- Iteration Start (Rollouts: {rollout_count}/{budget}) ---"))
205
+ yield get_current_state()
206
+
207
+ parent_candidate = select_candidate_for_mutation(candidate_pool, len(training_data))
208
+ log_history.append(log_message(f"Selected candidate #{parent_candidate['id']} (Score: {parent_candidate['avg_score']:.2f}) for mutation."))
209
+ yield get_current_state()
210
+
211
+ task_index = random.randint(0, len(training_data) - 1)
212
+ reflection_task = training_data[task_index]
213
+ log_history.append(log_message(f"Performing reflective mutation using task {task_index + 1}..."))
214
+ yield get_current_state()
215
+
216
+ rollout_output = run_huggingface_rollout(hf_client, model_id, parent_candidate["prompt"], reflection_task["input"])
217
+ rollout_count += 1
218
+ eval_result = evaluation_and_feedback_function(rollout_output, reflection_task)
219
+
220
+ new_prompt = reflect_and_propose_new_prompt(gemini_model, parent_candidate["prompt"], [{
221
+ "input": reflection_task["input"],
222
+ "output": rollout_output,
223
+ "feedback": eval_result["feedback"]
224
+ }])
225
+
226
+ new_candidate = {"id": len(candidate_pool), "prompt": new_prompt, "parentId": parent_candidate["id"], "scores": [0.0] * len(training_data), "avg_score": 0.0}
227
+ log_history.append(log_message(f"Generated new candidate prompt #{new_candidate['id']}."))
228
+ yield get_current_state()
229
+
230
+ new_total_score = 0.0
231
+ for i, task in enumerate(training_data):
232
+ if rollout_count >= budget: break
233
+ output = run_huggingface_rollout(hf_client, model_id, new_candidate["prompt"], task["input"])
234
+ eval_result = evaluation_and_feedback_function(output, task)
235
+ new_candidate["scores"][i] = eval_result["score"]
236
+ new_total_score += eval_result["score"]
237
+ rollout_count += 1
238
+ new_candidate["avg_score"] = new_total_score / len(training_data) if training_data else 0.0
239
+
240
+ if new_candidate["avg_score"] > parent_candidate["avg_score"]:
241
+ log_history.append(log_message(f"New candidate #{new_candidate['id']} improved! Score: {new_candidate['avg_score']:.2f} > {parent_candidate['avg_score']:.2f}", 'success'))
242
+ candidate_pool.append(new_candidate)
243
+ if new_candidate["avg_score"] > best_candidate["avg_score"]:
244
+ best_candidate = new_candidate
245
+ log_history.append(log_message("NEW BEST PROMPT FOUND!", 'best'))
246
+ yield get_current_state()
247
+ else:
248
+ log_history.append(log_message(f"New candidate #{new_candidate['id']} did not improve. Score: {new_candidate['avg_score']:.2f}. Discarding.", 'fail'))
249
+
250
+ yield get_current_state()
251
+
252
+ log_history.append(log_message("Optimization budget exhausted. Finished.", 'best'))
253
+ yield get_current_state()
254
+
255
+
256
+ # --- Gradio Interface Definition ---
257
+ with gr.Blocks(theme=gr.themes.Soft(), title="GEPA Prompt Optimizer") as demo:
258
+ gr.Markdown("""
259
+ # GEPA Prompt Optimizer for Hugging Face Models
260
+ This Space implements the **GEPA (Genetic-Pareto)** framework to automatically optimize prompts for a target model (like Gemma) hosted on Hugging Face.
261
+ It uses a powerful LLM (Gemini) for the "reflection" step to propose high-quality prompt improvements.
262
+ """)
263
+
264
+ with gr.Row():
265
+ with gr.Column(scale=1):
266
+ gr.Markdown("## 1. Configuration")
267
+ hf_token_input = gr.Textbox(label="Hugging Face API Token (Optional)", type="password", info="Leave blank if HF_TOKEN is set as a Space Secret.")
268
+ gemini_key_input = gr.Textbox(label="Google Gemini API Key (Optional)", type="password", info="Leave blank if GEMINI_API_KEY is set as a Space Secret.")
269
+ model_id_input = gr.Textbox(label="Target Model ID", value="google/gemma-2b-it", info="The Hugging Face model to optimize for.")
270
+ seed_prompt_input = gr.Textbox(label="Initial Seed Prompt", lines=5, value="You are a helpful assistant that summarizes text. Given the following text, provide a one-sentence summary.")
271
+ training_data_input = gr.Code(
272
+ label="Training Data (JSON)",
273
+ language="json",
274
+ lines=10,
275
+ value="""[
276
+ {
277
+ "input": "The Eiffel Tower is a wrought-iron lattice tower on the Champ de Mars in Paris, France. It is named after the engineer Gustave Eiffel, whose company designed and built the tower.",
278
+ "expected_keywords": ["Eiffel Tower", "Paris"]
279
+ },
280
+ {
281
+ "input": "The Great Wall of China is a series of fortifications that were built across the historical northern borders of ancient Chinese states and Imperial China as protection against various nomadic groups from the Eurasian Steppe.",
282
+ "expected_keywords": ["Great Wall", "China", "fortifications"]
283
+ },
284
+ {
285
+ "input": "The Colosseum is an oval amphitheatre in the centre of the city of Rome, Italy, just east of the Roman Forum. It is the largest ancient amphitheatre ever built, and is still the largest standing amphitheatre in the world today, despite its age.",
286
+ "expected_keywords": ["Colosseum", "Rome", "amphitheatre"]
287
+ }
288
+ ]"""
289
+ )
290
+ budget_input = gr.Slider(label="Optimization Budget (Total Rollouts)", minimum=5, maximum=100, value=10, step=1)
291
+ start_button = gr.Button("Start Optimization", variant="primary")
292
+
293
+ with gr.Column(scale=1):
294
+ gr.Markdown("## 2. Results")
295
+ best_prompt_output = gr.Textbox(label="Best Prompt Found", lines=8, interactive=False)
296
+ best_score_output = gr.Textbox(label="Best Score", interactive=False)
297
+ log_output = gr.Textbox(label="Optimization Log", lines=20, interactive=False, autoscroll=True)
298
+
299
+ start_button.click(
300
+ fn=run_gepa_optimization,
301
+ inputs=[hf_token_input, gemini_key_input, model_id_input, seed_prompt_input, training_data_input, budget_input],
302
+ outputs=[log_output, best_prompt_output, best_score_output]
303
+ )
304
+
305
+ if __name__ == "__main__":
306
+ demo.launch()