File size: 6,321 Bytes
aa6f369
531e420
aa6f369
8531a26
aa6f369
 
f5044d7
aa6f369
 
4d76afc
531e420
aa6f369
531e420
86d3caa
4d76afc
531e420
 
 
 
 
 
 
 
86d3caa
531e420
 
 
 
 
 
 
 
 
338b431
86d3caa
531e420
 
338b431
b647320
8941b06
38f5ac1
531e420
 
 
 
 
 
 
 
 
 
 
 
 
 
4d76afc
 
338b431
531e420
 
 
 
 
 
 
 
 
 
86d3caa
531e420
4d76afc
aa6f369
 
8531a26
aa6f369
4d76afc
531e420
 
 
 
 
 
 
38f5ac1
531e420
 
 
4d76afc
aa6f369
531e420
 
 
 
 
86d3caa
531e420
 
 
86d3caa
531e420
 
e8a0246
531e420
aa6f369
531e420
 
86d3caa
4d76afc
531e420
 
 
4d76afc
aa6f369
 
531e420
 
 
 
 
 
 
86d3caa
aa6f369
531e420
 
 
 
 
 
 
 
 
 
 
e3eee09
531e420
 
 
 
 
 
 
 
 
e3eee09
531e420
4d76afc
531e420
 
 
 
 
 
6541c57
f5a64b7
aa6f369
531e420
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import gradio as gr
from huggingface_hub import InferenceClient
import os
import re

API_TOKEN = os.getenv("HF_TOKEN", None)
MODEL = "Qwen/Qwen3-32B"

try:
    print(f"Initializing Inference Client for model: {MODEL}")
    client = InferenceClient(model=MODEL, token=API_TOKEN) if API_TOKEN else InferenceClient(model=MODEL)
except Exception as e:
    raise gr.Error(f"Failed to initialize model client for {MODEL}. Error: {e}. Check HF_TOKEN and model availability.")


def extract_files_from_code(raw_code: str) -> list:
    """
    Parses the full code block output and extracts files defined using markdown-style triple backticks,
    e.g., ```index.html ... ```
    Returns a list of dicts with filename, language, and content.
    """
    pattern = r"```([a-zA-Z0-9.+_-]+)\n(.*?)```"
    matches = re.finditer(pattern, raw_code, flags=re.DOTALL)

    files = []
    for match in matches:
        filename = match.group(1).strip()
        content = match.group(2).strip()
        lang = "plaintext"
        if filename.endswith(".html"): lang = "html"
        elif filename.endswith(".py"): lang = "python"
        elif filename.endswith(".js"): lang = "javascript"
        elif filename.endswith(".css"): lang = "css"
        files.append({
            "filename": filename,
            "content": content,
            "language": lang
        })
    return files


def clean_streamed_response(text: str) -> str:
    """
    Remove <think>...</think> and system/assistant/user tokens.
    """
    # Remove <think>...</think> blocks
    text = re.sub(r"<think>.*?</think>", "", text, flags=re.DOTALL)
    # Remove role markers
    text = re.sub(r"<\s*\|?\s*(user|system|assistant)\s*\|?\s*>", "", text, flags=re.IGNORECASE)
    return text


def extract_think_message(text: str) -> str:
    match = re.search(r"<think>(.*?)</think>", text, flags=re.DOTALL)
    return match.group(1).strip() if match else ""


def generate_code(prompt, backend_choice, max_tokens, temperature, top_p):
    print(f"Streaming generation for: {prompt[:80]}...")

    system_message = (
        "You are an AI that generates website code. You MUST ONLY output the raw code, without any conversational text like 'Here is the code' or explanations before or after the code blocks. "
        "You MUST NOT wrap the code in markdown fences like ```html, ```python, or ```js. "
        "If the user requests 'Static' or the prompt clearly implies only frontend code, generate ONLY the content for the `index.html` file. "
        "If the user requests 'Flask' or 'Node.js' and the prompt requires backend logic, you MUST generate both the `index.html` content AND the corresponding main backend file content (e.g., `app.py` for Flask, `server.js` or `app.js` for Node.js). "
        "When generating multiple files, you MUST wrap them in separate triple-backtick sections labeled with filenames like ```index.html, ```app.py, etc. "
        "The generated website code must be SFW and have minimal errors. Only include comments where user modification is strictly required."
    )

    user_prompt = f"USER_PROMPT = {prompt}\nUSER_BACKEND = {backend_choice}"

    messages = [
        {"role": "system", "content": system_message},
        {"role": "user", "content": user_prompt}
    ]

    stream = client.chat_completion(
        messages=messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    )

    full_response = ""
    files = []
    yield [], "", gr.update(visible=False)

    try:
        for message in stream:
            token = message.choices[0].delta.content
            if not token:
                continue
            full_response += token

            # Extract and display <think> message (live)
            think = extract_think_message(full_response)
            cleaned = clean_streamed_response(full_response)

            parsed_files = extract_files_from_code(cleaned)
            files = parsed_files  # live update

            yield files, think, gr.update(visible=bool(think.strip()))
    except Exception as e:
        print(f"Error: {e}")
        yield [], f"Error: {e}", gr.update(visible=True)


with gr.Blocks(css=".gradio-container { max-width: 95% !important; }") as demo:
    gr.Markdown("# ⚡ Website Code Generator")
    gr.Markdown("Enter a description and get live code split into actual files like `index.html`, `app.py`, etc.")

    with gr.Row():
        with gr.Column(scale=2):
            prompt = gr.Textbox(label="Website Prompt", lines=5)
            backend = gr.Radio(["Static", "Flask", "Node.js"], value="Static", label="Backend Type")
            gen_btn = gr.Button("Generate Code", variant="primary")
            with gr.Accordion("Advanced", open=False):
                max_tokens = gr.Slider(512, 4096, step=256, value=2048, label="Max Tokens")
                temperature = gr.Slider(0.1, 1.5, step=0.1, value=0.7, label="Temperature")
                top_p = gr.Slider(0.1, 1.0, step=0.05, value=0.95, label="Top-P")

        with gr.Column(scale=3):
            file_output = gr.Group()
            code_tabs = gr.Tabs()

            dynamic_outputs = []

            for i in range(5):  # Pre-create 5 tabs max
                with code_tabs:
                    code_box = gr.Code(label=f"File {i+1}", language="plaintext", visible=False, lines=25)
                    dynamic_outputs.append(code_box)

            think_box = gr.Textbox(label="Thinking...", visible=False, interactive=False)

    def display_outputs(file_list, think_msg, think_visible):
        updates = []
        for i in range(5):
            if i < len(file_list):
                f = file_list[i]
                updates.append(gr.update(value=f["content"], label=f["filename"], language=f["language"], visible=True))
            else:
                updates.append(gr.update(visible=False))
        return updates + [gr.update(value=think_msg, visible=think_visible)]

    gen_btn.click(
        fn=generate_code,
        inputs=[prompt, backend, max_tokens, temperature, top_p],
        outputs=[gr.State(), gr.State(), think_box],
    ).then(
        fn=display_outputs,
        inputs=[gr.State(), gr.State(), think_box],
        outputs=dynamic_outputs + [think_box]
    )

if __name__ == "__main__":
    demo.queue().launch()