Deddy commited on
Commit
c368720
·
verified ·
1 Parent(s): 82eb600

Upload 8 files

Browse files
Files changed (8) hide show
  1. app.py +336 -0
  2. custom_css.py +52 -0
  3. metaprompt_router.py +130 -0
  4. metaprompt_sample.py +57 -0
  5. prompt_refiner.py +64 -0
  6. requirements.txt +7 -0
  7. themes.py +54 -0
  8. variables.py +121 -0
app.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ PromptSuite AI
3
+ ==============
4
+
5
+ Deskripsi Proyek:
6
+ -----------------
7
+ PromptSuite AI adalah platform rekayasa prompt modern untuk membandingkan, menganalisis,
8
+ dan memperbaiki prompt secara otomatis ataupun manual, berbasis Large Language Model (LLM) open source.
9
+ Platform ini dirancang untuk peneliti, praktisi AI, developer, dan siapapun yang ingin mengeksplorasi
10
+ efek optimasi prompt terhadap kualitas output AI.
11
+
12
+ Fitur:
13
+ ------
14
+ - Perbandingan output prompt original & hasil refine (multi-tab, side-by-side)
15
+ - Refinement otomatis maupun manual, dengan berbagai metaprompt canggih
16
+ - UI responsif dengan status tombol dinamis & reset otomatis
17
+ - Panel JSON untuk output full response (debug/research)
18
+ - Dukungan custom CSS & styling profesional
19
+ - Bisa dijalankan di lokal, server, maupun cloud
20
+
21
+ Teknologi:
22
+ ----------
23
+ - Gradio advanced + custom JS + modular backend PromptRefiner
24
+ - Fleksibel untuk model apapun (tinggal sesuaikan backend PromptRefiner)
25
+ - Siap untuk pengembangan riset atau industri
26
+
27
+ """
28
+
29
+ import gradio as gr
30
+
31
+ from prompt_refiner import PemurniPrompt
32
+ from variables import api_token, models, meta_prompts, explanation_markdown, metaprompt_list, metaprompt_explanations, examples
33
+ from custom_css import custom_css
34
+ from themes import IndonesiaTheme
35
+
36
+ class PromptSuiteAI:
37
+ def __init__(self, prompt_refiner: PemurniPrompt, custom_css):
38
+ self.prompt_refiner = prompt_refiner
39
+ default_model = models[-1] if len(models) >= 1 else models[0] if models else None
40
+
41
+ # with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as self.interface:
42
+ with gr.Blocks(theme=IndonesiaTheme(), css=custom_css) as self.interface:
43
+ with gr.Column(elem_classes=["container", "title-container"]):
44
+ # Banner gambar di bagian atas
45
+ gr.HTML("""
46
+ <div style='text-align: center;'>
47
+ <img src='https://i.ibb.co/Gv3WDQrw/banner-propmptsuite.jpg' alt='Banner' style='width: 100%; height: auto;'/>
48
+ </div>
49
+ """)
50
+ gr.Markdown("# PromptSuite AI")
51
+ gr.Markdown("### Otomatisasi dan Perbandingan Rekayasa Prompt LLM")
52
+ gr.Markdown("Bandingkan, evaluasi, dan optimasi prompt AI Anda secara praktis dan canggih.")
53
+ # Keterangan platform
54
+ gr.Markdown(
55
+ """
56
+ <span style='font-size:1.03em; color:#ccc'>
57
+ PromptSuite AI adalah platform rekayasa prompt modern untuk membandingkan, menganalisis,
58
+ dan memperbaiki prompt secara otomatis ataupun manual, berbasis Large Language Model (LLM) open source.
59
+ Platform ini dirancang untuk peneliti, praktisi AI, developer, dan siapapun yang ingin mengeksplorasi
60
+ efek optimasi prompt terhadap kualitas output AI.
61
+ </span>
62
+ """
63
+ )
64
+
65
+ # --- KONTENER 2: Input Prompt & Contoh ---
66
+ with gr.Column(elem_classes=["container", "input-container"]):
67
+ prompt_text = gr.Textbox(label="Tulis prompt Anda (atau kosongkan untuk melihat metaprompt)", lines=5)
68
+ with gr.Accordion("Contoh Prompt", open=False, visible=True):
69
+ gr.Examples(examples=examples, inputs=[prompt_text])
70
+ automatic_metaprompt_button = gr.Button(
71
+ "Pilih Otomatis Metode Perbaikan",
72
+ elem_classes=["button-highlight"]
73
+ )
74
+ MetaPrompt_analysis = gr.Markdown()
75
+
76
+ # --- KONTENER 3: Pilihan Metaprompt & Penjelasan ---
77
+ with gr.Column(elem_classes=["container", "meta-container"]):
78
+ meta_prompt_choice = gr.Radio(
79
+ choices=metaprompt_list,
80
+ label="Pilih Metaprompt",
81
+ value=metaprompt_list[0],
82
+ elem_classes=["no-background", "radio-group"]
83
+ )
84
+ refine_button = gr.Button(
85
+ "Perbaiki Prompt",
86
+ elem_classes=["button-waiting"]
87
+ )
88
+ with gr.Accordion("Penjelasan Metaprompt", open=False, visible=True):
89
+ gr.Markdown(explanation_markdown)
90
+
91
+ # --- KONTENER 4: Analisis & Refined Prompt ---
92
+ with gr.Column(elem_classes=["container", "analysis-container"]):
93
+ gr.Markdown(" ")
94
+ prompt_evaluation = gr.Markdown()
95
+ gr.Markdown("### Prompt yang Telah Diperbaiki")
96
+ refined_prompt = gr.Textbox(
97
+ label=" ",
98
+ interactive=True,
99
+ show_label=True,
100
+ show_copy_button=True,
101
+ )
102
+ explanation_of_refinements = gr.Markdown()
103
+
104
+ # --- KONTENER 5: Pilihan Model & Output Tab ---
105
+ with gr.Column(elem_classes=["container", "model-container"]):
106
+ with gr.Row():
107
+ apply_model = gr.Dropdown(
108
+ choices=models,
109
+ value=default_model,
110
+ label="Pilih Model",
111
+ container=False,
112
+ scale=1,
113
+ min_width=300
114
+ )
115
+ apply_button = gr.Button(
116
+ "Uji Prompt ke Model",
117
+ elem_classes=["button-waiting"]
118
+ )
119
+ gr.Markdown("### Hasil Pada Model Terpilih")
120
+ with gr.Tabs(elem_classes=["tabs"]):
121
+ with gr.TabItem("Perbandingan Output", elem_classes=["tabitem"]):
122
+ with gr.Row(elem_classes=["output-row"]):
123
+ with gr.Column(scale=1, elem_classes=["comparison-column"]):
124
+ gr.Markdown("### Output Prompt Asli")
125
+ original_output1 = gr.Markdown(
126
+ elem_classes=["output-content"],
127
+ visible=True
128
+ )
129
+ with gr.Column(scale=1, elem_classes=["comparison-column"]):
130
+ gr.Markdown("### Output Prompt Diperbaiki")
131
+ refined_output1 = gr.Markdown(
132
+ elem_classes=["output-content"],
133
+ visible=True
134
+ )
135
+ with gr.TabItem("Output Prompt Asli", elem_classes=["tabitem"]):
136
+ with gr.Row(elem_classes=["output-row"]):
137
+ with gr.Column(scale=1, elem_classes=["comparison-column"]):
138
+ gr.Markdown("### Output Prompt Asli")
139
+ original_output = gr.Markdown(
140
+ elem_classes=["output-content"],
141
+ visible=True
142
+ )
143
+ with gr.TabItem("Output Prompt Diperbaiki", elem_classes=["tabitem"]):
144
+ with gr.Row(elem_classes=["output-row"]):
145
+ with gr.Column(scale=1, elem_classes=["comparison-column"]):
146
+ gr.Markdown("### Output Prompt Diperbaiki")
147
+ refined_output = gr.Markdown(
148
+ elem_classes=["output-content"],
149
+ visible=True
150
+ )
151
+ with gr.Accordion("Respons JSON Lengkap", open=False, visible=True):
152
+ full_response_json = gr.JSON()
153
+
154
+ # ======================= EVENT HANDLER / JS ==========================
155
+
156
+ # Handler untuk otomatis memilih metaprompt
157
+ def automatic_metaprompt(prompt: str):
158
+ if not prompt.strip():
159
+ return "Silakan masukkan prompt untuk dianalisis.", None
160
+ metaprompt_analysis, recommended_key = self.prompt_refiner.automatic_metaprompt(prompt)
161
+ return metaprompt_analysis, recommended_key
162
+
163
+ # Handler untuk refine prompt manual
164
+ def refine_prompt(prompt: str, meta_prompt_choice: str):
165
+ if not prompt.strip():
166
+ return ("Tidak ada prompt.", "", "", {})
167
+ result = self.prompt_refiner.refine_prompt(prompt, meta_prompt_choice)
168
+ return (
169
+ result[0], # Evaluasi awal prompt
170
+ result[1], # Prompt diperbaiki
171
+ result[2], # Penjelasan perbaikan
172
+ result[3] # Full JSON response
173
+ )
174
+
175
+ # Handler untuk menguji prompt pada model
176
+ def apply_prompts(original_prompt: str, refined_prompt_: str, model: str):
177
+ if not original_prompt or not refined_prompt_:
178
+ return (
179
+ "Silakan isi prompt asli dan hasil refine.",
180
+ "Silakan isi prompt asli dan hasil refine.",
181
+ "Silakan isi prompt asli dan hasil refine.",
182
+ "Silakan isi prompt asli dan hasil refine."
183
+ )
184
+ if not model:
185
+ return (
186
+ "Pilih model terlebih dahulu.",
187
+ "Pilih model terlebih dahulu.",
188
+ "Pilih model terlebih dahulu.",
189
+ "Pilih model terlebih dahulu."
190
+ )
191
+ try:
192
+ original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
193
+ refined_output_ = self.prompt_refiner.apply_prompt(refined_prompt_, model)
194
+ except Exception as e:
195
+ err = f"Terjadi error: {str(e)}"
196
+ return (err, err, err, err)
197
+ return (
198
+ str(original_output) if original_output else "Tidak ada output.",
199
+ str(refined_output_) if refined_output_ else "Tidak ada output.",
200
+ str(original_output) if original_output else "Tidak ada output.",
201
+ str(refined_output_) if refined_output_ else "Tidak ada output."
202
+ )
203
+
204
+ # --- Event click dan chain JS custom, sama persis dengan kode asli ---
205
+ automatic_metaprompt_button.click(
206
+ fn=automatic_metaprompt,
207
+ inputs=[prompt_text],
208
+ outputs=[MetaPrompt_analysis, meta_prompt_choice]
209
+ ).then(
210
+ fn=lambda: None,
211
+ inputs=None,
212
+ outputs=None,
213
+ js="""
214
+ () => {
215
+ document.querySelectorAll('.analysis-container textarea, .analysis-container .markdown-text, .model-container .markdown-text, .comparison-output').forEach(el => {
216
+ if (el.value !== undefined) {
217
+ el.value = '';
218
+ } else {
219
+ el.textContent = '';
220
+ }
221
+ });
222
+ const allButtons = Array.from(document.querySelectorAll('button')).filter(btn =>
223
+ btn.textContent.includes('Pilih Otomatis') ||
224
+ btn.textContent.includes('Perbaiki Prompt') ||
225
+ btn.textContent.includes('Uji Prompt')
226
+ );
227
+ allButtons.forEach(btn => btn.classList.remove('button-highlight'));
228
+ allButtons[1].classList.add('button-highlight');
229
+ allButtons[0].classList.add('button-completed');
230
+ allButtons[2].classList.add('button-waiting');
231
+ }
232
+ """
233
+ )
234
+
235
+ refine_button.click(
236
+ fn=refine_prompt,
237
+ inputs=[prompt_text, meta_prompt_choice],
238
+ outputs=[prompt_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
239
+ ).then(
240
+ fn=lambda: None,
241
+ inputs=None,
242
+ outputs=None,
243
+ js="""
244
+ () => {
245
+ document.querySelectorAll('.model-container .markdown-text, .comparison-output').forEach(el => {
246
+ if (el.value !== undefined) {
247
+ el.value = '';
248
+ } else {
249
+ el.textContent = '';
250
+ }
251
+ });
252
+ const allButtons = Array.from(document.querySelectorAll('button')).filter(btn =>
253
+ btn.textContent.includes('Pilih Otomatis') ||
254
+ btn.textContent.includes('Perbaiki Prompt') ||
255
+ btn.textContent.includes('Uji Prompt')
256
+ );
257
+ allButtons.forEach(btn => btn.classList.remove('button-highlight'));
258
+ allButtons[2].classList.add('button-highlight');
259
+ allButtons[1].classList.add('button-completed');
260
+ allButtons[2].classList.remove('button-waiting');
261
+ }
262
+ """
263
+ )
264
+
265
+ apply_button.click(
266
+ fn=apply_prompts,
267
+ inputs=[prompt_text, refined_prompt, apply_model],
268
+ outputs=[original_output, refined_output, original_output1, refined_output1],
269
+ show_progress=True
270
+ ).then(
271
+ fn=lambda: None,
272
+ inputs=None,
273
+ outputs=None,
274
+ js="""
275
+ () => {
276
+ const allButtons = Array.from(document.querySelectorAll('button')).filter(btn =>
277
+ btn.textContent.includes('Pilih Otomatis') ||
278
+ btn.textContent.includes('Perbaiki Prompt') ||
279
+ btn.textContent.includes('Uji Prompt')
280
+ );
281
+ allButtons.forEach(btn => btn.classList.remove('button-highlight', 'button-waiting'));
282
+ allButtons[2].classList.add('button-completed');
283
+ document.querySelectorAll('.comparison-output').forEach(el => {
284
+ if (el.parentElement) {
285
+ el.parentElement.style.display = 'none';
286
+ setTimeout(() => {
287
+ el.parentElement.style.display = 'block';
288
+ }, 100);
289
+ }
290
+ });
291
+ }
292
+ """
293
+ )
294
+
295
+ # Reset ketika input berubah
296
+ prompt_text.change(
297
+ fn=lambda: None,
298
+ inputs=None,
299
+ outputs=None,
300
+ js="""
301
+ () => {
302
+ document.querySelectorAll('.analysis-container textarea, .analysis-container .markdown-text, .model-container .markdown-text, .comparison-output').forEach(el => {
303
+ if (el.value !== undefined) {
304
+ el.value = '';
305
+ } else {
306
+ el.textContent = '';
307
+ }
308
+ });
309
+ const allButtons = Array.from(document.querySelectorAll('button')).filter(btn =>
310
+ btn.textContent.includes('Pilih Otomatis') ||
311
+ btn.textContent.includes('Perbaiki Prompt') ||
312
+ btn.textContent.includes('Uji Prompt')
313
+ );
314
+ allButtons.forEach(btn => {
315
+ btn.classList.remove('button-completed', 'button-highlight', 'button-waiting');
316
+ });
317
+ allButtons[0].classList.add('button-highlight');
318
+ allButtons.slice(1).forEach(btn => btn.classList.add('button-waiting'));
319
+ }
320
+ """
321
+ )
322
+
323
+ def launch(self, share=False):
324
+ """Jalankan antarmuka PromptSuite AI"""
325
+ self.interface.launch(share=share)
326
+
327
+
328
+ if __name__ == '__main__':
329
+ prompt_refiner = PemurniPrompt(api_token, meta_prompts, metaprompt_explanations)
330
+ app = PromptSuiteAI(prompt_refiner, custom_css)
331
+ app.launch(share=False)
332
+
333
+
334
+
335
+
336
+ # Author: __drat (c)2025
custom_css.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File ini awalnya digunakan untuk menyimpan custom CSS untuk tampilan Gradio.
2
+ # Namun, sesuai instruksi, aplikasi sekarang tidak menggunakan CSS custom sama sekali.
3
+ # Tidak perlu mengisi kode apapun di file ini.
4
+
5
+ custom_css = """
6
+ #col-left, #col-mid {
7
+ margin: 0 auto;
8
+ max-width: 400px;
9
+ padding: 10px;
10
+ border-radius: 15px;
11
+ background-color: #f9f9f9;
12
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
13
+ }
14
+ #col-right {
15
+ margin: 0 auto;
16
+ max-width: 400px;
17
+ padding: 10px;
18
+ border-radius: 15px;
19
+ background: linear-gradient(180deg, #B6BBC4, #EEEEEE);
20
+ color: white;
21
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
22
+ }
23
+ #col-bott {
24
+ margin: 0 auto;
25
+ padding: 10px;
26
+ border-radius: 15px;
27
+ background-color: #f9f9f9;
28
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
29
+ }
30
+ #banner {
31
+ width: 100%;
32
+ text-align: center;
33
+ margin-bottom: 20px;
34
+ }
35
+ #run-button {
36
+ background-color: #ff4b5c;
37
+ color: white;
38
+ font-weight: bold;
39
+ padding: 30px;
40
+ border-radius: 10px;
41
+ cursor: pointer;
42
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
43
+ }
44
+ #footer {
45
+ text-align: center;
46
+ margin-top: 20px;
47
+ color: silver;
48
+ }
49
+ #markdown-silver {
50
+ color: silver; /* Mengatur warna font Markdown menjadi silver */
51
+ }
52
+ """
metaprompt_router.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ def generate_metaprompt_router(methods_dict):
3
+ # Start with the base template
4
+ router_template = """
5
+ You are an AI Prompt Selection Assistant. Your task is to analyze the user's query and recommend the most appropriate metaprompt from the following list based on the nature of the request. Provide your response in a structured JSON format.
6
+
7
+ **Metaprompt List:**
8
+ """
9
+
10
+ # Add each method to the template
11
+ for i, (key, method) in enumerate(methods_dict.items(), 1):
12
+ method_template = f"""
13
+ {i}. **{key}**
14
+ - **Name**: {method['name']}
15
+ - **Description**: {method['description']}
16
+ - **Sample**: {', '.join(f'"{example}"' for example in method.get('examples', []))}
17
+ """
18
+ router_template += method_template
19
+
20
+ # Add the output format template
21
+ router_template += """
22
+ For this given user query:
23
+ [Insert initial prompt here]
24
+
25
+ Analyze the query and provide your recommendation in the following JSON format enclosed in <json> tags:
26
+
27
+ <json>
28
+ {
29
+ "user_query": "The original query from the user",
30
+ "recommended_metaprompt": {
31
+ "key": "Key of the recommended metaprompt",
32
+ "name": "Name of the recommended metaprompt",
33
+ "description": "Brief description of the metaprompt's purpose",
34
+ "explanation": "Detailed explanation of why this metaprompt is the best fit for this specific query, including how it addresses the query's unique requirements and challenges",
35
+ "similar_sample": "If available, a sample use case from the list that's most similar to the user's query",
36
+ "customized_sample": "A new sample specifically tailored to the user's query using this metaprompt approach"
37
+ },
38
+ "alternative_recommendation": {
39
+ "key": "Key of the second-best metaprompt option",
40
+ "name": "Name of the second-best metaprompt option",
41
+ "explanation": "Brief explanation of why this could be an alternative choice and what specific benefits it might offer for this query"
42
+ }
43
+ }
44
+ </json>
45
+ """
46
+
47
+ return router_template
48
+
49
+ # Generate the router configuration
50
+ #metaprompt_router = generate_metaprompt_router(methods_dict) # methods_dict is your full file dictionary
51
+
52
+
53
+ metaprompt_router = """
54
+ You are an AI Prompt Selection Assistant. Your task is to analyze the user's query and recommend the most appropriate metaprompt from the available methods. Each method has specific strengths and use cases.
55
+
56
+ **Metaprompt List:**
57
+ 1. **comprehensive_multistage**
58
+ - **Name**: Comprehensive Multi-Stage Refinement
59
+ - **Description**: Use this method for a thorough, multi-stage refinement process. Ideal for complex prompts requiring in-depth analysis, exploration of alternatives, and synthesis of ideas. Best when time allows for detailed refinement and consideration of various aspects.
60
+ - **Sample**: "Design a comprehensive educational curriculum for teaching artificial intelligence to high school students", "Develop a detailed analysis of climate change impacts on global agriculture over the next 50 years"
61
+
62
+ 2. **structured_roleplaying**
63
+ - **Name**: Structured Role-Playing Enhancement
64
+ - **Description**: Opt for this when you need a structured approach with emphasis on role-playing and advanced techniques. Useful for tasks benefiting from diverse perspectives and complex reasoning.
65
+ - **Sample**: "Create a dialogue between Einstein and a modern AI researcher discussing the future of quantum computing", "Simulate a strategic planning meeting between historical business leaders solving current tech industry challenges"
66
+
67
+ 3. **balanced_scientific**
68
+ - **Name**: Balanced Scientific Structuring
69
+ - **Description**: Choose this for a balance between structure and advanced techniques, with a focus on role-playing. Suitable for scientific or technical prompts.
70
+ - **Sample**: "Explain how CRISPR gene editing technology works and its potential applications in medicine", "Analyze the psychological and neurological factors that influence decision-making in high-pressure situations"
71
+
72
+ 4. **quick_simplified**
73
+ - **Name**: Quick Simplified Refinement
74
+ - **Description**: Use this simplified approach for straightforward prompts or when time is limited. Focuses on essential improvements without complex techniques.
75
+ - **Sample**: "What are the key differences between renewable and non-renewable energy sources?", "Explain the basic principles of machine learning in simple terms"
76
+
77
+ 5. **logical_flow**
78
+ - **Name**: Logical Flow Enhancement
79
+ - **Description**: Choose this method to analyze and improve a prompt's strengths and weaknesses, focusing on information flow. Useful for enhancing the logical structure of prompts.
80
+ - **Sample**: "Break down the process of implementing a sustainable urban transportation system", "Analyze the cause-and-effect relationship between social media use and mental health"
81
+
82
+ 6. **flexible_technique**
83
+ - **Name**: Flexible Technique Integration
84
+ - **Description**: Employ this advanced approach to combine multiple prompt engineering techniques. Ideal for complex tasks requiring both clarity and sophisticated methods.
85
+ - **Sample**: "Create a comprehensive guide for starting a tech startup, including business, technical, and marketing aspects", "Design a multi-phase approach to teaching critical thinking skills in different educational contexts"
86
+
87
+ 7. **autoregressive_reasoning**
88
+ - **Name**: Autoregressive Reasoning Optimization
89
+ - **Description**: Utilize this method for tasks requiring careful reasoning before conclusions. Best for prompts needing detailed output formatting.
90
+ - **Sample**: "Develop a step-by-step analysis of market trends to predict future investment opportunities", "Create a systematic approach to debugging complex software systems"
91
+
92
+ 8. **mathematical_proof**
93
+ - **Name**: Mathematical Proof Structuring
94
+ - **Description**: Specialized approach for mathematical and formal proofs. Use this for tasks requiring a logical, step-by-step prompt engineering process.
95
+ - **Sample**: "Prove the relationship between energy and mass in Einstein's E=mc²", "Demonstrate the mathematical principles behind modern encryption methods"
96
+
97
+ 9. **sequential_contextual**
98
+ - **Name**: Sequential Contextual Enhancement
99
+ - **Description**: Advanced reasoning and proof engineering approach. Focuses on systematic prompt enhancement through structured analysis, enhancement protocols, and validation. Ideal for complex tasks requiring thorough documentation and systematic improvements.
100
+ - **Sample**: "Create a framework for analyzing the long-term societal impacts of artificial intelligence", "Develop a systematic approach to evaluating and improving corporate sustainability practices"
101
+
102
+ 10. **attention_aware**
103
+ - **Name**: Attention-Aware Positioning
104
+ - **Description**: Token-efficient prompt optimization focusing on attention positioning and context management. Best for tasks requiring careful information placement and progressive context building while maintaining efficiency.
105
+ - **Sample**: "Design a progressive learning curriculum that builds complex concepts from fundamental principles", "Create a narrative structure for explaining quantum physics concepts to general audiences"
106
+
107
+ For this given user query:
108
+ [Insert initial prompt here]
109
+
110
+ Analyze the query and provide your recommendation in the following JSON format enclosed in <json> tags:
111
+
112
+ <json>
113
+ {
114
+ "user_query": "The original query from the user",
115
+ "recommended_metaprompt": {
116
+ "key": "Key of the recommended metaprompt",
117
+ "name": "Name of the recommended metaprompt",
118
+ "description": "Brief description of the metaprompt's purpose",
119
+ "explanation": "Detailed explanation of why this metaprompt is the best fit for this specific query, including how it addresses the query's unique requirements and challenges",
120
+ "similar_sample": "If available, a sample use case from the list that's most similar to the user's query",
121
+ "customized_sample": "A new sample specifically tailored to the user's query using this metaprompt approach"
122
+ },
123
+ "alternative_recommendation": {
124
+ "key": "Key of the second-best metaprompt option",
125
+ "name": "Name of the second-best metaprompt option",
126
+ "explanation": "Brief explanation of why this could be an alternative choice and what specific benefits it might offer for this query"
127
+ }
128
+ }
129
+ </json>
130
+ """
metaprompt_sample.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ openai_metaprompt='''
2
+ Given a task description or existing prompt, produce a detailed system prompt to guide a language model in completing the task effectively.
3
+
4
+ Initial Prompt: [Insert initial prompt here]
5
+
6
+ # Guidelines
7
+
8
+ - Understand the Task: Grasp the main objective, goals, requirements, constraints, and expected output.
9
+ - Minimal Changes: If an existing prompt is provided, improve it only if it's simple. For complex prompts, enhance clarity and add missing elements without altering the original structure.
10
+ - Reasoning Before Conclusions**: Encourage reasoning steps before any conclusions are reached. ATTENTION! If the user provides examples where the reasoning happens afterward, REVERSE the order! NEVER START EXAMPLES WITH CONCLUSIONS!
11
+ - Reasoning Order: Call out reasoning portions of the prompt and conclusion parts (specific fields by name). For each, determine the ORDER in which this is done, and whether it needs to be reversed.
12
+ - Conclusion, classifications, or results should ALWAYS appear last.
13
+ - Examples: Include high-quality examples if helpful, using placeholders [in brackets] for complex elements.
14
+ - What kinds of examples may need to be included, how many, and whether they are complex enough to benefit from placeholders.
15
+ - Clarity and Conciseness: Use clear, specific language. Avoid unnecessary instructions or bland statements.
16
+ - Formatting: Use markdown features for readability. DO NOT USE ``` CODE BLOCKS UNLESS SPECIFICALLY REQUESTED.
17
+ - Preserve User Content: If the input task or prompt includes extensive guidelines or examples, preserve them entirely, or as closely as possible. If they are vague, consider breaking down into sub-steps. Keep any details, guidelines, examples, variables, or placeholders provided by the user.
18
+ - Constants: DO include constants in the prompt, as they are not susceptible to prompt injection. Such as guides, rubrics, and examples.
19
+ - Output Format: Explicitly the most appropriate output format, in detail. This should include length and syntax (e.g. short sentence, paragraph, JSON, etc.)
20
+ - For tasks outputting well-defined or structured data (classification, JSON, etc.) bias toward outputting a JSON.
21
+ - JSON should never be wrapped in code blocks (```) unless explicitly requested.
22
+
23
+ The final prompt you output should adhere to the following structure below. Do not include any additional commentary, only output the completed system prompt. SPECIFICALLY, do not include any additional messages at the start or end of the prompt. (e.g. no "---")
24
+
25
+ [Concise instruction describing the task - this should be the first line in the prompt, no section header]
26
+
27
+ [Additional details as needed.]
28
+
29
+ [Optional sections with headings or bullet points for detailed steps.]
30
+
31
+ # Steps [optional]
32
+
33
+ [optional: a detailed breakdown of the steps necessary to accomplish the task]
34
+
35
+ # Output Format
36
+
37
+ [Specifically call out how the output should be formatted, be it response length, structure e.g. JSON, markdown, etc]
38
+
39
+ # Examples [optional]
40
+
41
+ [Optional: 1-3 well-defined examples with placeholders if necessary. Clearly mark where examples start and end, and what the input and output are. User placeholders as necessary.]
42
+ [If the examples are shorter than what a realistic example is expected to be, make a reference with () explaining how real examples should be longer / shorter / different. AND USE PLACEHOLDERS! ]
43
+
44
+ # Notes [optional]
45
+
46
+ [optional: edge cases, details, and an area to call or repeat out specific important considerations]
47
+
48
+ Only provide the output in the following JSON format enclosed in <json> tags:
49
+
50
+ <json>
51
+ {
52
+ "initial_prompt_evaluation": "Your evaluation of the initial prompt with Strengths and Weaknesses in a string on markdown bullet points format",
53
+ "refined_prompt": "Your refined prompt",
54
+ "explanation_of_refinements": "Explanation of the techniques used and improvements made, also include the extract of final prompt where it made. Answer in bullet points if accurate. Output in a single comprehensive string"
55
+ }
56
+ </json>
57
+ '''
prompt_refiner.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Kelas PemurniPrompt digunakan untuk menganalisis dan memperbaiki prompt sesuai metode/metaprompt yang dipilih pengguna
2
+
3
+ class PemurniPrompt:
4
+ def __init__(self, api_token, meta_prompts, metaprompt_explanations):
5
+ self.api_token = api_token
6
+ self.meta_prompts = meta_prompts
7
+ self.metaprompt_explanations = metaprompt_explanations
8
+
9
+ def automatic_metaprompt(self, prompt):
10
+ """
11
+ Otomatis memilih metaprompt terbaik berdasarkan isi prompt user.
12
+ Return tuple: (analisis, saran_metaprompt)
13
+ """
14
+ # Algoritma dummy — bisa di-upgrade sesuai kebutuhan!
15
+ if not prompt or prompt.strip() == "":
16
+ return "Prompt kosong. Silakan isi prompt.", None
17
+
18
+ # Heuristik sederhana: Jika panjang, pilih comprehensive_multistage, jika mengandung 'data', pilih balanced_scientific
19
+ if "data" in prompt.lower():
20
+ analisis = "Prompt ini berkaitan dengan data, disarankan 'balanced_scientific'."
21
+ return analisis, "balanced_scientific"
22
+ elif len(prompt.split()) > 25:
23
+ analisis = "Prompt cukup kompleks, disarankan 'comprehensive_multistage'."
24
+ return analisis, "comprehensive_multistage"
25
+ else:
26
+ analisis = "Prompt normal, gunakan 'logical_flow' untuk hasil optimal."
27
+ return analisis, "logical_flow"
28
+
29
+ def refine_prompt(self, prompt, metaprompt_key):
30
+ """
31
+ Melakukan perbaikan prompt secara manual dengan metaprompt terpilih.
32
+ Return: (evaluasi, prompt_baru, penjelasan, full_response)
33
+ """
34
+ if not prompt or prompt.strip() == "":
35
+ return ("Tidak ada prompt yang diberikan.", "", "", {})
36
+
37
+ # Dummy refining — gabungkan template metaprompt dengan prompt user
38
+ template = self.meta_prompts.get(metaprompt_key, "")
39
+ if template:
40
+ prompt_baru = f"{template.strip()}\n{prompt.strip()}"
41
+ else:
42
+ prompt_baru = f"[{metaprompt_key}]\n{prompt.strip()}"
43
+
44
+ evaluasi = f"Prompt telah diperbaiki menggunakan metaprompt '{metaprompt_key}'."
45
+ penjelasan = self.metaprompt_explanations.get(metaprompt_key, "Tidak ada penjelasan tersedia.")
46
+ full_response = {
47
+ "prompt_awal": prompt,
48
+ "prompt_baru": prompt_baru,
49
+ "metaprompt": metaprompt_key,
50
+ "evaluasi": evaluasi,
51
+ "penjelasan": penjelasan
52
+ }
53
+ return (evaluasi, prompt_baru, penjelasan, full_response)
54
+
55
+ def apply_prompt(self, prompt, model):
56
+ """
57
+ Mensimulasikan pengujian prompt pada model LLM.
58
+ Return: string output hasil 'jawaban model'
59
+ """
60
+ if not prompt or not model:
61
+ return "Prompt atau model kosong."
62
+
63
+ # Dummy output — ganti dengan pemanggilan API model Anda jika siap
64
+ return f"(MODEL: {model})\nHASIL:\n{prompt[:400]} ...[Output simulasi LLM]"
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ gradio
4
+ httpx==0.24.1
5
+ accelerate>=0.26.0
6
+ tenacity
7
+ pydantic
themes.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import Iterable
3
+ from gradio.themes.base import Base
4
+ from gradio.themes.utils import colors, fonts, sizes
5
+
6
+ class IndonesiaTheme(Base):
7
+ def __init__(
8
+ self,
9
+ *,
10
+ primary_hue: colors.Color | str = colors.red,
11
+ secondary_hue: colors.Color | str = colors.gray,
12
+ neutral_hue: colors.Color | str = colors.gray,
13
+ spacing_size: sizes.Size | str = sizes.spacing_md,
14
+ radius_size: sizes.Size | str = sizes.radius_md,
15
+ text_size: sizes.Size | str = sizes.text_lg,
16
+ font: fonts.Font
17
+ | str
18
+ | Iterable[fonts.Font | str] = (
19
+ fonts.GoogleFont("Quicksand"),
20
+ "ui-sans-serif",
21
+ "sans-serif",
22
+ ),
23
+ font_mono: fonts.Font
24
+ | str
25
+ | Iterable[fonts.Font | str] = (
26
+ fonts.GoogleFont("IBM Plex Mono"),
27
+ "ui-monospace",
28
+ "monospace",
29
+ ),
30
+ ):
31
+ super().__init__(
32
+ primary_hue=primary_hue,
33
+ secondary_hue=secondary_hue,
34
+ neutral_hue=neutral_hue,
35
+ spacing_size=spacing_size,
36
+ radius_size=radius_size,
37
+ text_size=text_size,
38
+ font=font,
39
+ font_mono=font_mono,
40
+ )
41
+ super().set(
42
+ body_background_fill="linear-gradient(to bottom, #e0e0e0, #7d7d7d)", # Gradasi abu-abu muda ke abu-abu tua
43
+ body_background_fill_dark="linear-gradient(to bottom, #7d7d7d, #4a4a4a)", # Gradasi abu-abu tua ke lebih gelap untuk dark mode
44
+ button_primary_background_fill="linear-gradient(90deg, #d84a4a, #b33030)", # Merah ke merah tua
45
+ button_primary_background_fill_hover="linear-gradient(90deg, #e85b5b, #cc4b4b)", # Merah lebih terang untuk hover
46
+ button_primary_text_color="white",
47
+ button_primary_background_fill_dark="linear-gradient(90deg, #b33030, #8f1f1f)", # Merah tua untuk dark mode
48
+ slider_color="*secondary_300",
49
+ slider_color_dark="*secondary_600",
50
+ block_title_text_weight="600",
51
+ block_border_width="3px",
52
+ block_shadow="*shadow_drop_lg",
53
+ button_large_padding="32px",
54
+ )
variables.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ # --- Cek variabel lingkungan untuk templates ---
5
+ template_json = os.getenv('PROMPT_TEMPLATES', '{}')
6
+
7
+ try:
8
+ # Parsing data JSON dengan penanganan error
9
+ data_prompt = json.loads(template_json)
10
+ except json.JSONDecodeError:
11
+ # Jika JSON tidak valid, fallback ke dict kosong
12
+ data_prompt = {}
13
+
14
+ # --- Daftar metaprompt Fallback (10 yang Deddy minta) ---
15
+ daftar_metaprompt_fallback = [
16
+ "comprehensive_multistage",
17
+ "structured_roleplaying",
18
+ "balanced_scientific",
19
+ "quick_simplified",
20
+ "logical_flow",
21
+ "flexible_technique",
22
+ "autoregressive_reasoning",
23
+ "mathematical_proof",
24
+ "sequential_contextual",
25
+ "attention_aware"
26
+ ]
27
+
28
+ # --- Deskripsi fallback untuk tiap metaprompt ---
29
+ penjelasan_metaprompt_fallback = {
30
+ "comprehensive_multistage": "Pendekatan multi-tahap yang komprehensif dan bertingkat.",
31
+ "structured_roleplaying": "Simulasi peran dengan struktur yang jelas.",
32
+ "balanced_scientific": "Keseimbangan antara sains, logika, dan objektivitas.",
33
+ "quick_simplified": "Hasil cepat dan penyederhanaan dalam eksekusi.",
34
+ "logical_flow": "Alur berpikir yang logis dan runtut.",
35
+ "flexible_technique": "Teknik adaptif, fleksibel untuk berbagai kasus.",
36
+ "autoregressive_reasoning": "Penalaran progresif, tahap demi tahap.",
37
+ "mathematical_proof": "Pendekatan matematis dan pembuktian formal.",
38
+ "sequential_contextual": "Proses bertahap dan mempertimbangkan konteks.",
39
+ "attention_aware": "Memaksimalkan fokus dan perhatian pada poin penting."
40
+ }
41
+
42
+ # --- Prioritaskan dari JSON ENV jika ada, jika tidak fallback ke default di atas ---
43
+ if data_prompt:
44
+ daftar_metaprompt = [kunci for kunci in data_prompt.keys()]
45
+ penjelasan_metaprompt = {
46
+ kunci: data.get("description", "Tidak ada deskripsi")
47
+ for kunci, data in data_prompt.items()
48
+ }
49
+ else:
50
+ daftar_metaprompt = daftar_metaprompt_fallback
51
+ penjelasan_metaprompt = penjelasan_metaprompt_fallback
52
+
53
+ print("Daftar Metaprompt:", daftar_metaprompt)
54
+
55
+ # --- Markdown penjelasan untuk UI ---
56
+ penjelasan_markdown = "".join([
57
+ f"- **{kunci}**: {isi}\n"
58
+ for kunci, isi in penjelasan_metaprompt.items()
59
+ ])
60
+
61
+ # --- Daftar model yang tersedia ---
62
+ daftar_model = [
63
+ "meta-llama/Meta-Llama-3-70B-Instruct",
64
+ "meta-llama/Meta-Llama-3-8B-Instruct",
65
+ "meta-llama/Llama-3.1-70B-Instruct",
66
+ "meta-llama/Llama-3.1-8B-Instruct",
67
+ "meta-llama/Llama-3.2-3B-Instruct",
68
+ "meta-llama/Llama-3.2-1B-Instruct",
69
+ "meta-llama/Llama-2-13b-chat-hf",
70
+ "meta-llama/Llama-2-7b-chat-hf",
71
+ "HuggingFaceH4/zephyr-7b-beta",
72
+ "HuggingFaceH4/zephyr-7b-alpha",
73
+ "Qwen/Qwen2.5-72B-Instruct",
74
+ "Qwen/Qwen2.5-1.5B",
75
+ "microsoft/Phi-3.5-mini-instruct"
76
+ ]
77
+
78
+ # --- Mengambil contoh prompt dari JSON templates (jika ada) ---
79
+ contoh_prompt = []
80
+ for kunci, data in data_prompt.items():
81
+ contoh_template = data.get("examples", [])
82
+ if contoh_template:
83
+ contoh_prompt.extend([
84
+ [contoh[0], kunci] if isinstance(contoh, list) else [contoh, kunci]
85
+ for contoh in contoh_template
86
+ ])
87
+
88
+ # --- Token API ---
89
+ api_token = os.getenv('HF_API_TOKEN')
90
+ if not api_token:
91
+ raise ValueError("HF_API_TOKEN tidak ditemukan di environment variable")
92
+
93
+ # --- Dictionary meta_prompts (template prompt) ---
94
+ meta_prompts = {
95
+ kunci: data.get("template", "Template tidak tersedia")
96
+ for kunci, data in data_prompt.items()
97
+ } if data_prompt else {k: "" for k in daftar_metaprompt}
98
+
99
+ # --- Model default untuk refiner, dari env atau fallback ---
100
+ model_refiner_prompt = os.getenv('prompt_refiner_model', 'meta-llama/Llama-3.1-8B-Instruct')
101
+ print("Model refiner prompt yang digunakan:", model_refiner_prompt)
102
+
103
+ # --- Variabel tambahan dari environment jika ada ---
104
+ echo_refiner_prompt = os.getenv('echo_prompt_refiner')
105
+ openai_metaprompt = os.getenv('openai_metaprompt')
106
+ metaprompt_lanjut = os.getenv('advanced_meta_prompt')
107
+
108
+ # --- Ekspor alias variabel supaya tetap kompatibel dengan app.py ---
109
+ metaprompt_list = daftar_metaprompt
110
+ explanation_markdown = penjelasan_markdown
111
+ models = daftar_model
112
+
113
+ examples = [
114
+ ["Buatlah ringkasan mendalam mengenai dampak revolusi industri 4.0 terhadap pola kerja masyarakat urban di Indonesia, dengan menyoroti perubahan sosial, ekonomi, serta tantangan sumber daya manusia di era digital.", "comprehensive_multistage"],
115
+ ["Bertindaklah sebagai pakar komunikasi publik dan simulasi tanya jawab antara seorang menteri dan wartawan terkait isu kenaikan harga bahan pokok, lengkap dengan dialog dan argumentasi masing-masing pihak.", "structured_roleplaying"],
116
+ ["Analisis secara kritis data pertumbuhan ekonomi Indonesia dalam lima tahun terakhir, dan jelaskan faktor-faktor utama yang mempengaruhi fluktuasi angka tersebut secara ilmiah dan objektif.", "balanced_scientific"],
117
+ ["Sederhanakan penjelasan tentang blockchain sehingga mudah dipahami oleh pelajar SMA, namun tetap mencakup mekanisme dasar, manfaat, serta potensi risikonya.", "quick_simplified"],
118
+ ["Jelaskan urutan logis proses produksi energi listrik dari sumber energi terbarukan, mulai dari tahap input sumber daya, konversi energi, distribusi, hingga konsumsi akhir oleh masyarakat.", "logical_flow"]
119
+ ]
120
+ metaprompt_explanations = penjelasan_metaprompt
121
+