MINEOGO's picture
Update app.py
5f4f3c1 verified
raw
history blame
2.88 kB
import gradio as gr
from huggingface_hub import InferenceClient
import re
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
def clean_code_blocks(raw_response):
"""
Extract code for each file from a structured LLM response
Expected format:
index.html
<code>
static/style.css
<code>
"""
parts = re.split(r"(?:\n|^)([^\n\/\\<>:\"|?*]+(?:\.[a-z]+)?(?:\/[^\n]+)?)\n", raw_response)
file_blocks = {}
for i in range(1, len(parts), 2):
filename = parts[i].strip()
code = parts[i + 1].strip()
if filename and code:
file_blocks[filename] = code
return file_blocks
def generate_code(prompt, system_message, max_tokens, temperature, top_p, backend):
full_system_prompt = f"""
You are a code-generation AI. You MUST generate a full website including an index.html file.
Use ONLY the {backend} backend structure.
Output all code for each file separately using this format:
filename.ext
<code without backticks>
Do NOT add commentary, do NOT use markdown. Output raw code only.
""".strip() + "\n\n" + system_message
messages = [
{"role": "system", "content": full_system_prompt},
{"role": "user", "content": prompt}
]
response = ""
for chunk in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p
):
token = chunk.choices[0].delta.content
if token:
response += token
# Parse and display each file in its own tab
files = clean_code_blocks(response)
tabs = []
for filename, code in files.items():
tabs.append(gr.TabItem(label=filename, elem_id=filename))
tabs.append(gr.Code(value=code, language="html" if filename.endswith(".html") else "python" if filename.endswith(".py") else "javascript" if filename.endswith(".js") else "css", label=filename))
return tabs
with gr.Blocks() as demo:
gr.Markdown("## WebGen AI β€” One Prompt β†’ Multi-File Website Generator")
with gr.Row():
prompt = gr.Textbox(label="Enter Prompt", placeholder="Describe the website you want...")
backend = gr.Dropdown(["Flask", "Static", "Node.js"], value="Static", label="Select Backend")
system_msg = gr.Textbox(value="You are a helpful assistant.", label="System Message")
max_tokens = gr.Slider(1, 2048, value=1024, label="Max Tokens")
temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature")
top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
run_btn = gr.Button("Generate Code")
tabs_output = gr.Group()
def wrapper(*args):
return generate_code(*args)
run_btn.click(wrapper, inputs=[prompt, system_msg, max_tokens, temperature, top_p, backend], outputs=tabs_output)
if __name__ == "__main__":
demo.launch()