MINEOGO commited on
Commit
5f4f3c1
Β·
verified Β·
1 Parent(s): 119b0cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -12
app.py CHANGED
@@ -1,13 +1,38 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
4
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  def generate_code(prompt, system_message, max_tokens, temperature, top_p, backend):
7
  full_system_prompt = f"""
8
  You are a code-generation AI. You MUST generate a full website including an index.html file.
9
- Use ONLY the {backend} backend structure.
10
- Respond ONLY with raw code and file/folder structure. Do NOT explain or add commentary.
 
 
 
 
 
11
  """.strip() + "\n\n" + system_message
12
 
13
  messages = [
@@ -26,29 +51,34 @@ Respond ONLY with raw code and file/folder structure. Do NOT explain or add comm
26
  token = chunk.choices[0].delta.content
27
  if token:
28
  response += token
29
- yield response
 
 
 
 
 
 
 
30
 
31
  with gr.Blocks() as demo:
32
- gr.Markdown("## WebGen AI β€” One Prompt β†’ Full Website Generator")
33
 
34
  with gr.Row():
35
  prompt = gr.Textbox(label="Enter Prompt", placeholder="Describe the website you want...")
36
  backend = gr.Dropdown(["Flask", "Static", "Node.js"], value="Static", label="Select Backend")
37
 
38
  system_msg = gr.Textbox(value="You are a helpful assistant.", label="System Message")
39
- max_tokens = gr.Slider(1, 2048, value=512, label="Max Tokens")
40
  temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature")
41
  top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
42
 
43
- code_output = gr.Code(label="AI Output (Live)", language="html")
44
-
45
  run_btn = gr.Button("Generate Code")
 
 
 
 
46
 
47
- run_btn.click(
48
- generate_code,
49
- inputs=[prompt, system_msg, max_tokens, temperature, top_p, backend],
50
- outputs=code_output
51
- )
52
 
53
  if __name__ == "__main__":
54
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import re
4
 
5
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
7
+ def clean_code_blocks(raw_response):
8
+ """
9
+ Extract code for each file from a structured LLM response
10
+ Expected format:
11
+ index.html
12
+ <code>
13
+
14
+ static/style.css
15
+ <code>
16
+ """
17
+ parts = re.split(r"(?:\n|^)([^\n\/\\<>:\"|?*]+(?:\.[a-z]+)?(?:\/[^\n]+)?)\n", raw_response)
18
+ file_blocks = {}
19
+ for i in range(1, len(parts), 2):
20
+ filename = parts[i].strip()
21
+ code = parts[i + 1].strip()
22
+ if filename and code:
23
+ file_blocks[filename] = code
24
+ return file_blocks
25
+
26
  def generate_code(prompt, system_message, max_tokens, temperature, top_p, backend):
27
  full_system_prompt = f"""
28
  You are a code-generation AI. You MUST generate a full website including an index.html file.
29
+ Use ONLY the {backend} backend structure.
30
+ Output all code for each file separately using this format:
31
+
32
+ filename.ext
33
+ <code without backticks>
34
+
35
+ Do NOT add commentary, do NOT use markdown. Output raw code only.
36
  """.strip() + "\n\n" + system_message
37
 
38
  messages = [
 
51
  token = chunk.choices[0].delta.content
52
  if token:
53
  response += token
54
+
55
+ # Parse and display each file in its own tab
56
+ files = clean_code_blocks(response)
57
+ tabs = []
58
+ for filename, code in files.items():
59
+ tabs.append(gr.TabItem(label=filename, elem_id=filename))
60
+ tabs.append(gr.Code(value=code, language="html" if filename.endswith(".html") else "python" if filename.endswith(".py") else "javascript" if filename.endswith(".js") else "css", label=filename))
61
+ return tabs
62
 
63
  with gr.Blocks() as demo:
64
+ gr.Markdown("## WebGen AI β€” One Prompt β†’ Multi-File Website Generator")
65
 
66
  with gr.Row():
67
  prompt = gr.Textbox(label="Enter Prompt", placeholder="Describe the website you want...")
68
  backend = gr.Dropdown(["Flask", "Static", "Node.js"], value="Static", label="Select Backend")
69
 
70
  system_msg = gr.Textbox(value="You are a helpful assistant.", label="System Message")
71
+ max_tokens = gr.Slider(1, 2048, value=1024, label="Max Tokens")
72
  temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature")
73
  top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
74
 
 
 
75
  run_btn = gr.Button("Generate Code")
76
+ tabs_output = gr.Group()
77
+
78
+ def wrapper(*args):
79
+ return generate_code(*args)
80
 
81
+ run_btn.click(wrapper, inputs=[prompt, system_msg, max_tokens, temperature, top_p, backend], outputs=tabs_output)
 
 
 
 
82
 
83
  if __name__ == "__main__":
84
  demo.launch()