Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,334 +1,157 @@
|
|
1 |
import gradio as gr
|
2 |
-
from huggingface_hub import InferenceClient
|
3 |
import os
|
4 |
import re
|
5 |
-
import traceback
|
6 |
|
7 |
-
# --- Configuration ---
|
8 |
API_TOKEN = os.getenv("HF_TOKEN", None)
|
9 |
-
# MODEL = "Qwen/Qwen3-32B" # This is a very large model, might require specific inference endpoint/hardware
|
10 |
-
# Let's try a smaller, generally available model for testing first, e.g., Mixtral
|
11 |
-
# You can change this back if you are sure Qwen3-32B is available and configured for your space/token
|
12 |
-
# MODEL = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
13 |
-
# Or uncomment the Qwen model if you are certain it's correctly set up for inference:
|
14 |
MODEL = "Qwen/Qwen3-32B"
|
15 |
-
# i have used Qwen3 because its quiet compatible
|
16 |
-
|
17 |
-
|
18 |
-
# --- Hugging Face Client Initialization ---
|
19 |
-
print("--- App Start ---")
|
20 |
-
if not API_TOKEN:
|
21 |
-
print("Warning: HF_TOKEN environment variable not set. Using anonymous access.")
|
22 |
-
print("Certain models might require a token for access.")
|
23 |
-
else:
|
24 |
-
print(f"HF_TOKEN found (length={len(API_TOKEN)}).") # Don't print the token itself
|
25 |
|
26 |
try:
|
27 |
print(f"Initializing Inference Client for model: {MODEL}")
|
28 |
-
|
29 |
-
client = InferenceClient(model=MODEL, token=API_TOKEN if API_TOKEN else None)
|
30 |
-
print("Inference Client Initialized Successfully.")
|
31 |
-
# Optional: Add a quick test call if feasible, but be mindful of potential costs/rate limits
|
32 |
-
# try:
|
33 |
-
# client.text_generation("test", max_new_tokens=1)
|
34 |
-
# print("Test generation successful.")
|
35 |
-
# except Exception as test_e:
|
36 |
-
# print(f"Warning: Test generation failed. Client might be initialized but model access could be problematic. Error: {test_e}")
|
37 |
-
|
38 |
-
except HfHubHTTPError as http_err:
|
39 |
-
# More specific error handling for HTTP errors (like 401 Unauthorized, 403 Forbidden, 404 Not Found)
|
40 |
-
error_message = (
|
41 |
-
f"Failed to initialize model client for {MODEL} due to an HTTP error.\n"
|
42 |
-
f"Status Code: {http_err.response.status_code}\n"
|
43 |
-
f"Error: {http_err}\n"
|
44 |
-
f"Check:\n"
|
45 |
-
f"1. If '{MODEL}' is a valid model ID on Hugging Face Hub.\n"
|
46 |
-
f"2. If the model requires gating or specific permissions.\n"
|
47 |
-
f"3. If your HF_TOKEN is correct and has the necessary permissions (set as a Secret in your Space).\n"
|
48 |
-
f"4. If the default Inference API supports this model or if a dedicated Inference Endpoint is needed."
|
49 |
-
)
|
50 |
-
print(f"ERROR: {error_message}")
|
51 |
-
raise gr.Error(error_message)
|
52 |
except Exception as e:
|
53 |
-
|
54 |
-
f"An unexpected error occurred while initializing the model client for {MODEL}.\n"
|
55 |
-
f"Error Type: {type(e).__name__}\n"
|
56 |
-
f"Error: {e}\n"
|
57 |
-
f"Traceback:\n{traceback.format_exc()}\n" # Add traceback
|
58 |
-
f"Check HF_TOKEN, model availability, network connection, and Space resources."
|
59 |
-
)
|
60 |
-
print(f"ERROR: {error_message}")
|
61 |
-
raise gr.Error(error_message)
|
62 |
|
63 |
-
# --- Helper Functions ---
|
64 |
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
# Basic language detection (can be expanded)
|
74 |
-
lang = None
|
75 |
-
if filename.endswith(".py"):
|
76 |
-
lang = "python"
|
77 |
-
elif filename.endswith(".js"):
|
78 |
-
lang = "javascript"
|
79 |
-
elif filename.endswith(".html"):
|
80 |
-
lang = "html"
|
81 |
-
elif filename.endswith(".css"):
|
82 |
-
lang = "css"
|
83 |
-
elif filename.endswith(".json"):
|
84 |
-
lang = "json"
|
85 |
-
elif filename.endswith(".md"):
|
86 |
-
lang = "markdown"
|
87 |
-
elif filename.endswith(".sh") or filename.endswith(".bash"):
|
88 |
-
lang = "bash"
|
89 |
-
elif filename.endswith(".java"):
|
90 |
-
lang = "java"
|
91 |
-
# Add more extensions as needed
|
92 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
files.append({
|
94 |
"filename": filename,
|
95 |
-
"
|
96 |
-
"
|
97 |
})
|
98 |
-
# Add logging to see what's parsed
|
99 |
-
# print(f"Parsed {len(files)} code blocks.")
|
100 |
-
# for i, f in enumerate(files):
|
101 |
-
# print(f" Block {i}: filename='{f['filename']}', lang='{f['language']}', code_len={len(f['code'])}")
|
102 |
return files
|
103 |
|
104 |
-
def strip_think_tags(text: str) -> str:
|
105 |
-
return re.sub(r"<think>.*?</think>", "", text, flags=re.DOTALL).strip()
|
106 |
|
107 |
-
def
|
108 |
-
|
109 |
-
|
110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
|
112 |
-
# --- System Message ---
|
113 |
-
system_message = (
|
114 |
-
"You are a helpful AI assistant specialized in generating website code. "
|
115 |
-
"Generate all the necessary files based on the user's request. "
|
116 |
-
"Output each file within a separate markdown code block formatted exactly like this:\n"
|
117 |
-
"```filename.ext\n"
|
118 |
-
"<code>\n"
|
119 |
-
"```\n"
|
120 |
-
"Do not add any explanatory text outside the code blocks. Ensure the filenames have appropriate extensions. "
|
121 |
-
"If you need to think step-by-step, use <think>...</think> tags. These tags will be hidden from the final user output but help guide your generation process."
|
122 |
-
)
|
123 |
|
124 |
-
# --- Code Generation Function ---
|
125 |
def generate_code(prompt, backend_choice, max_tokens, temperature, top_p):
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
|
131 |
-
|
132 |
-
user_prompt = f"USER_PROMPT: {prompt}\nUSER_BACKEND_PREFERENCE: {backend_choice}"
|
133 |
|
134 |
messages = [
|
135 |
{"role": "system", "content": system_message},
|
136 |
{"role": "user", "content": user_prompt}
|
137 |
]
|
138 |
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
yield [], gr.update(visible=True, value="Generating code...")
|
147 |
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
|
152 |
try:
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
top_p=top_p,
|
159 |
-
# Consider adding stop sequences if the model tends to run on
|
160 |
-
# stop=["```\n\n", "\n\nHuman:", "\n\nUSER:"] # Example stop sequences
|
161 |
-
)
|
162 |
-
|
163 |
-
code_updates = [] # Store the gr.Code components to yield
|
164 |
-
|
165 |
-
for i, message in enumerate(stream):
|
166 |
-
# Check for errors in the stream message (some providers might include error info)
|
167 |
-
if hasattr(message, 'error') and message.error:
|
168 |
-
accumulated_error += f"Error in stream chunk {i}: {message.error}\n"
|
169 |
-
print(f"ERROR in stream chunk {i}: {message.error}")
|
170 |
-
continue # Skip this chunk if it's an error indicator
|
171 |
|
172 |
-
#
|
173 |
-
|
174 |
-
|
175 |
-
token = message.choices[0].delta.content
|
176 |
-
# Handle potential None token at the end of the stream or in error cases
|
177 |
-
if token is None:
|
178 |
-
token = ""
|
179 |
-
# print(f"Token {i}: '{token}'") # DEBUG: print each token
|
180 |
-
except (AttributeError, IndexError, TypeError) as e:
|
181 |
-
# Handle unexpected message structure
|
182 |
-
print(f"Warning: Could not extract token from stream message {i}. Structure: {message}. Error: {e}")
|
183 |
-
token = "" # Assign empty string to avoid breaking accumulation
|
184 |
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
# Update thinking box periodically (e.g., every 10 tokens or if thoughts change)
|
189 |
-
if i % 10 == 0 or "<think>" in token or "</think>" in token:
|
190 |
-
thoughts = extract_thoughts(full_response)
|
191 |
-
if thoughts != current_thoughts:
|
192 |
-
current_thoughts = thoughts
|
193 |
-
# Don't yield code_updates here yet, only update thoughts
|
194 |
-
yield code_updates, gr.update(value=current_thoughts if current_thoughts else "Thinking...", visible=True)
|
195 |
-
|
196 |
-
|
197 |
-
# Update code blocks less frequently or when a block seems complete
|
198 |
-
# Heuristic: update if the response ends with ```
|
199 |
-
if token.strip().endswith("```") or i % 20 == 0: # Adjust frequency as needed
|
200 |
-
cleaned_response = strip_think_tags(full_response)
|
201 |
-
parsed_files = parse_code_blocks(cleaned_response)
|
202 |
-
|
203 |
-
# Create gr.Code components for the parsed files
|
204 |
-
# Compare with existing code_updates to avoid redundant updates if content hasn't changed significantly
|
205 |
-
new_code_updates = []
|
206 |
-
changed = False
|
207 |
-
if len(parsed_files) != len(code_updates):
|
208 |
-
changed = True
|
209 |
-
else:
|
210 |
-
# Quick check if filenames/code lengths differ significantly
|
211 |
-
for idx, f in enumerate(parsed_files):
|
212 |
-
if (idx >= len(code_updates) or
|
213 |
-
f["filename"] != code_updates[idx].label or
|
214 |
-
len(f["code"]) != len(code_updates[idx].value)): # Simple length check
|
215 |
-
changed = True
|
216 |
-
break
|
217 |
-
|
218 |
-
if changed or not code_updates: # Update if changed or first time
|
219 |
-
code_updates = []
|
220 |
-
for f in parsed_files:
|
221 |
-
code_updates.append(
|
222 |
-
gr.Code(
|
223 |
-
value=f["code"],
|
224 |
-
label=f["filename"],
|
225 |
-
language=f["language"]
|
226 |
-
)
|
227 |
-
)
|
228 |
-
# Yield the list of gr.Code components to the gr.Column
|
229 |
-
# Also update thoughts (might be slightly out of sync, but acceptable)
|
230 |
-
yield code_updates, gr.update(value=current_thoughts if current_thoughts else "Thinking...", visible=True)
|
231 |
-
|
232 |
-
|
233 |
-
# --- Final Update after Stream Ends ---
|
234 |
-
print("Stream finished.")
|
235 |
-
if accumulated_error:
|
236 |
-
print(f"Errors occurred during stream:\n{accumulated_error}")
|
237 |
-
# Decide how to show this to the user, e.g., append to thoughts or show separately
|
238 |
-
current_thoughts += f"\n\n**Streaming Errors:**\n{accumulated_error}"
|
239 |
-
|
240 |
-
cleaned_response = strip_think_tags(full_response)
|
241 |
-
final_files = parse_code_blocks(cleaned_response)
|
242 |
-
print(f"Final parsed files: {len(final_files)}")
|
243 |
-
|
244 |
-
final_code_updates = []
|
245 |
-
if not final_files and not accumulated_error:
|
246 |
-
# Handle case where no code blocks were generated
|
247 |
-
final_code_updates.append(gr.Markdown("No code blocks were generated. The model might have responded with text instead, or the format was incorrect."))
|
248 |
-
print("Warning: No code blocks found in the final response.")
|
249 |
-
# Optionally show the raw response for debugging
|
250 |
-
# final_code_updates.append(gr.Code(label="Raw Response", value=cleaned_response, language="text"))
|
251 |
-
|
252 |
-
elif not final_files and accumulated_error:
|
253 |
-
final_code_updates.append(gr.Markdown(f"**Error during generation:**\n{accumulated_error}"))
|
254 |
-
|
255 |
-
else:
|
256 |
-
for f in final_files:
|
257 |
-
final_code_updates.append(
|
258 |
-
gr.Code(
|
259 |
-
value=f["code"],
|
260 |
-
label=f["filename"],
|
261 |
-
language=f["language"]
|
262 |
-
)
|
263 |
-
)
|
264 |
-
|
265 |
-
# Yield final code blocks and hide thinking box (or show final thoughts/errors)
|
266 |
-
final_thought_update = gr.update(visible=True if current_thoughts else False, value=current_thoughts)
|
267 |
-
yield final_code_updates, final_thought_update
|
268 |
-
|
269 |
-
except HfHubHTTPError as http_err:
|
270 |
-
# Handle errors during the streaming call itself
|
271 |
-
error_message = (
|
272 |
-
f"**Error during code generation (HTTP Error):**\n"
|
273 |
-
f"Status Code: {http_err.response.status_code}\n"
|
274 |
-
f"Error: {http_err}\n"
|
275 |
-
f"This could be due to rate limits, invalid input, model errors, or token issues.\n"
|
276 |
-
f"Check the Hugging Face Space logs for more details."
|
277 |
-
)
|
278 |
-
print(f"ERROR: {error_message}")
|
279 |
-
print(traceback.format_exc())
|
280 |
-
# Yield error message in the output area
|
281 |
-
yield [gr.Markdown(error_message)], gr.update(visible=False) # Hide thinking box on error
|
282 |
|
|
|
283 |
except Exception as e:
|
284 |
-
|
285 |
-
|
286 |
-
f"Error Type: {type(e).__name__}\n"
|
287 |
-
f"Error: {e}\n\n"
|
288 |
-
f"**Traceback:**\n```\n{traceback.format_exc()}\n```\n"
|
289 |
-
f"Check the Hugging Face Space logs for more details."
|
290 |
-
)
|
291 |
-
print(f"ERROR: {error_message}")
|
292 |
-
# Yield error message in the output area
|
293 |
-
yield [gr.Markdown(error_message)], gr.update(visible=False) # Hide thinking box on error
|
294 |
|
295 |
|
296 |
-
|
297 |
-
|
298 |
-
gr.Markdown("
|
299 |
-
gr.Markdown("Describe the website you want. Code files will appear below. Uses `mistralai/Mixtral-8x7B-Instruct-v0.1` by default (check code to change).") # Update description
|
300 |
|
301 |
with gr.Row():
|
302 |
with gr.Column(scale=2):
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
top_p_slider = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-P (Nucleus Sampling)")
|
311 |
|
312 |
with gr.Column(scale=3):
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
317 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
318 |
|
319 |
-
|
320 |
fn=generate_code,
|
321 |
-
inputs=[
|
322 |
-
|
323 |
-
|
324 |
-
|
|
|
|
|
325 |
)
|
326 |
|
327 |
-
# --- Launch ---
|
328 |
if __name__ == "__main__":
|
329 |
-
|
330 |
-
# Use queue() for handling multiple users and streaming
|
331 |
-
# Set share=False unless you specifically want a public link from local execution
|
332 |
-
# Set debug=True for more detailed Gradio errors locally (remove/set False for production)
|
333 |
-
demo.queue().launch(debug=False, share=False)
|
334 |
-
print("Gradio App Launched.")
|
|
|
1 |
import gradio as gr
|
2 |
+
from huggingface_hub import InferenceClient
|
3 |
import os
|
4 |
import re
|
|
|
5 |
|
|
|
6 |
API_TOKEN = os.getenv("HF_TOKEN", None)
|
|
|
|
|
|
|
|
|
|
|
7 |
MODEL = "Qwen/Qwen3-32B"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
try:
|
10 |
print(f"Initializing Inference Client for model: {MODEL}")
|
11 |
+
client = InferenceClient(model=MODEL, token=API_TOKEN) if API_TOKEN else InferenceClient(model=MODEL)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
except Exception as e:
|
13 |
+
raise gr.Error(f"Failed to initialize model client for {MODEL}. Error: {e}. Check HF_TOKEN and model availability.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
|
|
15 |
|
16 |
+
def extract_files_from_code(raw_code: str) -> list:
|
17 |
+
"""
|
18 |
+
Parses the full code block output and extracts files defined using markdown-style triple backticks,
|
19 |
+
e.g., ```index.html ... ```
|
20 |
+
Returns a list of dicts with filename, language, and content.
|
21 |
+
"""
|
22 |
+
pattern = r"```([a-zA-Z0-9.+_-]+)\n(.*?)```"
|
23 |
+
matches = re.finditer(pattern, raw_code, flags=re.DOTALL)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
+
files = []
|
26 |
+
for match in matches:
|
27 |
+
filename = match.group(1).strip()
|
28 |
+
content = match.group(2).strip()
|
29 |
+
lang = "plaintext"
|
30 |
+
if filename.endswith(".html"): lang = "html"
|
31 |
+
elif filename.endswith(".py"): lang = "python"
|
32 |
+
elif filename.endswith(".js"): lang = "javascript"
|
33 |
+
elif filename.endswith(".css"): lang = "css"
|
34 |
files.append({
|
35 |
"filename": filename,
|
36 |
+
"content": content,
|
37 |
+
"language": lang
|
38 |
})
|
|
|
|
|
|
|
|
|
39 |
return files
|
40 |
|
|
|
|
|
41 |
|
42 |
+
def clean_streamed_response(text: str) -> str:
|
43 |
+
"""
|
44 |
+
Remove <think>...</think> and system/assistant/user tokens.
|
45 |
+
"""
|
46 |
+
# Remove <think>...</think> blocks
|
47 |
+
text = re.sub(r"<think>.*?</think>", "", text, flags=re.DOTALL)
|
48 |
+
# Remove role markers
|
49 |
+
text = re.sub(r"<\s*\|?\s*(user|system|assistant)\s*\|?\s*>", "", text, flags=re.IGNORECASE)
|
50 |
+
return text
|
51 |
+
|
52 |
+
|
53 |
+
def extract_think_message(text: str) -> str:
|
54 |
+
match = re.search(r"<think>(.*?)</think>", text, flags=re.DOTALL)
|
55 |
+
return match.group(1).strip() if match else ""
|
56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
|
|
58 |
def generate_code(prompt, backend_choice, max_tokens, temperature, top_p):
|
59 |
+
print(f"Streaming generation for: {prompt[:80]}...")
|
60 |
+
|
61 |
+
system_message = (
|
62 |
+
"You are an AI that generates website code. You MUST ONLY output the raw code, without any conversational text like 'Here is the code' or explanations before or after the code blocks. "
|
63 |
+
"You MUST NOT wrap the code in markdown fences like ```html, ```python, or ```js. "
|
64 |
+
"If the user requests 'Static' or the prompt clearly implies only frontend code, generate ONLY the content for the `index.html` file. "
|
65 |
+
"If the user requests 'Flask' or 'Node.js' and the prompt requires backend logic, you MUST generate both the `index.html` content AND the corresponding main backend file content (e.g., `app.py` for Flask, `server.js` or `app.js` for Node.js). "
|
66 |
+
"When generating multiple files, you MUST wrap them in separate triple-backtick sections labeled with filenames like ```index.html, ```app.py, etc. "
|
67 |
+
"The generated website code must be SFW and have minimal errors. Only include comments where user modification is strictly required."
|
68 |
+
)
|
69 |
|
70 |
+
user_prompt = f"USER_PROMPT = {prompt}\nUSER_BACKEND = {backend_choice}"
|
|
|
71 |
|
72 |
messages = [
|
73 |
{"role": "system", "content": system_message},
|
74 |
{"role": "user", "content": user_prompt}
|
75 |
]
|
76 |
|
77 |
+
stream = client.chat_completion(
|
78 |
+
messages=messages,
|
79 |
+
max_tokens=max_tokens,
|
80 |
+
stream=True,
|
81 |
+
temperature=temperature,
|
82 |
+
top_p=top_p,
|
83 |
+
)
|
|
|
84 |
|
85 |
+
full_response = ""
|
86 |
+
files = []
|
87 |
+
yield [], "", gr.update(visible=False)
|
88 |
|
89 |
try:
|
90 |
+
for message in stream:
|
91 |
+
token = message.choices[0].delta.content
|
92 |
+
if not token:
|
93 |
+
continue
|
94 |
+
full_response += token
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
|
96 |
+
# Extract and display <think> message (live)
|
97 |
+
think = extract_think_message(full_response)
|
98 |
+
cleaned = clean_streamed_response(full_response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
+
parsed_files = extract_files_from_code(cleaned)
|
101 |
+
files = parsed_files # live update
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
|
103 |
+
yield files, think, gr.update(visible=bool(think.strip()))
|
104 |
except Exception as e:
|
105 |
+
print(f"Error: {e}")
|
106 |
+
yield [], f"Error: {e}", gr.update(visible=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
|
108 |
|
109 |
+
with gr.Blocks(css=".gradio-container { max-width: 95% !important; }") as demo:
|
110 |
+
gr.Markdown("# ⚡ Website Code Generator")
|
111 |
+
gr.Markdown("Enter a description and get live code split into actual files like `index.html`, `app.py`, etc.")
|
|
|
112 |
|
113 |
with gr.Row():
|
114 |
with gr.Column(scale=2):
|
115 |
+
prompt = gr.Textbox(label="Website Prompt", lines=5)
|
116 |
+
backend = gr.Radio(["Static", "Flask", "Node.js"], value="Static", label="Backend Type")
|
117 |
+
gen_btn = gr.Button("Generate Code", variant="primary")
|
118 |
+
with gr.Accordion("Advanced", open=False):
|
119 |
+
max_tokens = gr.Slider(512, 4096, step=256, value=2048, label="Max Tokens")
|
120 |
+
temperature = gr.Slider(0.1, 1.5, step=0.1, value=0.7, label="Temperature")
|
121 |
+
top_p = gr.Slider(0.1, 1.0, step=0.05, value=0.95, label="Top-P")
|
|
|
122 |
|
123 |
with gr.Column(scale=3):
|
124 |
+
file_output = gr.Group()
|
125 |
+
code_tabs = gr.Tabs()
|
126 |
+
|
127 |
+
dynamic_outputs = []
|
128 |
+
|
129 |
+
for i in range(5): # Pre-create 5 tabs max
|
130 |
+
with code_tabs:
|
131 |
+
code_box = gr.Code(label=f"File {i+1}", language="plaintext", visible=False, lines=25)
|
132 |
+
dynamic_outputs.append(code_box)
|
133 |
+
|
134 |
+
think_box = gr.Textbox(label="Thinking...", visible=False, interactive=False)
|
135 |
|
136 |
+
def display_outputs(file_list, think_msg, think_visible):
|
137 |
+
updates = []
|
138 |
+
for i in range(5):
|
139 |
+
if i < len(file_list):
|
140 |
+
f = file_list[i]
|
141 |
+
updates.append(gr.update(value=f["content"], label=f["filename"], language=f["language"], visible=True))
|
142 |
+
else:
|
143 |
+
updates.append(gr.update(visible=False))
|
144 |
+
return updates + [gr.update(value=think_msg, visible=think_visible)]
|
145 |
|
146 |
+
gen_btn.click(
|
147 |
fn=generate_code,
|
148 |
+
inputs=[prompt, backend, max_tokens, temperature, top_p],
|
149 |
+
outputs=[gr.State(), gr.State(), think_box],
|
150 |
+
).then(
|
151 |
+
fn=display_outputs,
|
152 |
+
inputs=[gr.State(), gr.State(), think_box],
|
153 |
+
outputs=dynamic_outputs + [think_box]
|
154 |
)
|
155 |
|
|
|
156 |
if __name__ == "__main__":
|
157 |
+
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|