Spaces:
Running
Running
Delete app.py
Browse files
app.py
DELETED
@@ -1,157 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from huggingface_hub import InferenceClient
|
3 |
-
import os
|
4 |
-
import re
|
5 |
-
|
6 |
-
API_TOKEN = os.getenv("HF_TOKEN", None)
|
7 |
-
MODEL = "Qwen/Qwen3-32B"
|
8 |
-
|
9 |
-
try:
|
10 |
-
print(f"Initializing Inference Client for model: {MODEL}")
|
11 |
-
client = InferenceClient(model=MODEL, token=API_TOKEN) if API_TOKEN else InferenceClient(model=MODEL)
|
12 |
-
except Exception as e:
|
13 |
-
raise gr.Error(f"Failed to initialize model client for {MODEL}. Error: {e}. Check HF_TOKEN and model availability.")
|
14 |
-
|
15 |
-
|
16 |
-
def extract_files_from_code(raw_code: str) -> list:
|
17 |
-
"""
|
18 |
-
Parses the full code block output and extracts files defined using markdown-style triple backticks,
|
19 |
-
e.g., ```index.html ... ```
|
20 |
-
Returns a list of dicts with filename, language, and content.
|
21 |
-
"""
|
22 |
-
pattern = r"```([a-zA-Z0-9.+_-]+)\n(.*?)```"
|
23 |
-
matches = re.finditer(pattern, raw_code, flags=re.DOTALL)
|
24 |
-
|
25 |
-
files = []
|
26 |
-
for match in matches:
|
27 |
-
filename = match.group(1).strip()
|
28 |
-
content = match.group(2).strip()
|
29 |
-
lang = "plaintext"
|
30 |
-
if filename.endswith(".html"): lang = "html"
|
31 |
-
elif filename.endswith(".py"): lang = "python"
|
32 |
-
elif filename.endswith(".js"): lang = "javascript"
|
33 |
-
elif filename.endswith(".css"): lang = "css"
|
34 |
-
files.append({
|
35 |
-
"filename": filename,
|
36 |
-
"content": content,
|
37 |
-
"language": lang
|
38 |
-
})
|
39 |
-
return files
|
40 |
-
|
41 |
-
|
42 |
-
def clean_streamed_response(text: str) -> str:
|
43 |
-
"""
|
44 |
-
Remove <think>...</think> and system/assistant/user tokens.
|
45 |
-
"""
|
46 |
-
# Remove <think>...</think> blocks
|
47 |
-
text = re.sub(r"<think>.*?</think>", "", text, flags=re.DOTALL)
|
48 |
-
# Remove role markers
|
49 |
-
text = re.sub(r"<\s*\|?\s*(user|system|assistant)\s*\|?\s*>", "", text, flags=re.IGNORECASE)
|
50 |
-
return text
|
51 |
-
|
52 |
-
|
53 |
-
def extract_think_message(text: str) -> str:
|
54 |
-
match = re.search(r"<think>(.*?)</think>", text, flags=re.DOTALL)
|
55 |
-
return match.group(1).strip() if match else ""
|
56 |
-
|
57 |
-
|
58 |
-
def generate_code(prompt, backend_choice, max_tokens, temperature, top_p):
|
59 |
-
print(f"Streaming generation for: {prompt[:80]}...")
|
60 |
-
|
61 |
-
system_message = (
|
62 |
-
"You are an AI that generates website code. You MUST ONLY output the raw code, without any conversational text like 'Here is the code' or explanations before or after the code blocks. "
|
63 |
-
"You MUST NOT wrap the code in markdown fences like ```html, ```python, or ```js. "
|
64 |
-
"If the user requests 'Static' or the prompt clearly implies only frontend code, generate ONLY the content for the `index.html` file. "
|
65 |
-
"If the user requests 'Flask' or 'Node.js' and the prompt requires backend logic, you MUST generate both the `index.html` content AND the corresponding main backend file content (e.g., `app.py` for Flask, `server.js` or `app.js` for Node.js). "
|
66 |
-
"When generating multiple files, you MUST wrap them in separate triple-backtick sections labeled with filenames like ```index.html, ```app.py, etc. "
|
67 |
-
"The generated website code must be SFW and have minimal errors. Only include comments where user modification is strictly required."
|
68 |
-
)
|
69 |
-
|
70 |
-
user_prompt = f"USER_PROMPT = {prompt}\nUSER_BACKEND = {backend_choice}"
|
71 |
-
|
72 |
-
messages = [
|
73 |
-
{"role": "system", "content": system_message},
|
74 |
-
{"role": "user", "content": user_prompt}
|
75 |
-
]
|
76 |
-
|
77 |
-
stream = client.chat_completion(
|
78 |
-
messages=messages,
|
79 |
-
max_tokens=max_tokens,
|
80 |
-
stream=True,
|
81 |
-
temperature=temperature,
|
82 |
-
top_p=top_p,
|
83 |
-
)
|
84 |
-
|
85 |
-
full_response = ""
|
86 |
-
files = []
|
87 |
-
yield [], "", gr.update(visible=False)
|
88 |
-
|
89 |
-
try:
|
90 |
-
for message in stream:
|
91 |
-
token = message.choices[0].delta.content
|
92 |
-
if not token:
|
93 |
-
continue
|
94 |
-
full_response += token
|
95 |
-
|
96 |
-
# Extract and display <think> message (live)
|
97 |
-
think = extract_think_message(full_response)
|
98 |
-
cleaned = clean_streamed_response(full_response)
|
99 |
-
|
100 |
-
parsed_files = extract_files_from_code(cleaned)
|
101 |
-
files = parsed_files # live update
|
102 |
-
|
103 |
-
yield files, think, gr.update(visible=bool(think.strip()))
|
104 |
-
except Exception as e:
|
105 |
-
print(f"Error: {e}")
|
106 |
-
yield [], f"Error: {e}", gr.update(visible=True)
|
107 |
-
|
108 |
-
|
109 |
-
with gr.Blocks(css=".gradio-container { max-width: 95% !important; }") as demo:
|
110 |
-
gr.Markdown("# ⚡ Website Code Generator")
|
111 |
-
gr.Markdown("Enter a description and get live code split into actual files like `index.html`, `app.py`, etc.")
|
112 |
-
|
113 |
-
with gr.Row():
|
114 |
-
with gr.Column(scale=2):
|
115 |
-
prompt = gr.Textbox(label="Website Prompt", lines=5)
|
116 |
-
backend = gr.Radio(["Static", "Flask", "Node.js"], value="Static", label="Backend Type")
|
117 |
-
gen_btn = gr.Button("Generate Code", variant="primary")
|
118 |
-
with gr.Accordion("Advanced", open=False):
|
119 |
-
max_tokens = gr.Slider(512, 4096, step=256, value=2048, label="Max Tokens")
|
120 |
-
temperature = gr.Slider(0.1, 1.5, step=0.1, value=0.7, label="Temperature")
|
121 |
-
top_p = gr.Slider(0.1, 1.0, step=0.05, value=0.95, label="Top-P")
|
122 |
-
|
123 |
-
with gr.Column(scale=3):
|
124 |
-
file_output = gr.Group()
|
125 |
-
code_tabs = gr.Tabs()
|
126 |
-
|
127 |
-
dynamic_outputs = []
|
128 |
-
|
129 |
-
for i in range(5): # Pre-create 5 tabs max
|
130 |
-
with code_tabs:
|
131 |
-
code_box = gr.Code(label=f"File {i+1}", language="plaintext", visible=False, lines=25)
|
132 |
-
dynamic_outputs.append(code_box)
|
133 |
-
|
134 |
-
think_box = gr.Textbox(label="Thinking...", visible=False, interactive=False)
|
135 |
-
|
136 |
-
def display_outputs(file_list, think_msg, think_visible):
|
137 |
-
updates = []
|
138 |
-
for i in range(5):
|
139 |
-
if i < len(file_list):
|
140 |
-
f = file_list[i]
|
141 |
-
updates.append(gr.update(value=f["content"], label=f["filename"], language=f["language"], visible=True))
|
142 |
-
else:
|
143 |
-
updates.append(gr.update(visible=False))
|
144 |
-
return updates + [gr.update(value=think_msg, visible=think_visible)]
|
145 |
-
|
146 |
-
gen_btn.click(
|
147 |
-
fn=generate_code,
|
148 |
-
inputs=[prompt, backend, max_tokens, temperature, top_p],
|
149 |
-
outputs=[gr.State(), gr.State(), think_box],
|
150 |
-
).then(
|
151 |
-
fn=display_outputs,
|
152 |
-
inputs=[gr.State(), gr.State(), think_box],
|
153 |
-
outputs=dynamic_outputs + [think_box]
|
154 |
-
)
|
155 |
-
|
156 |
-
if __name__ == "__main__":
|
157 |
-
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|