Spaces:
Running
Running
File size: 11,002 Bytes
dfc5e93 4917053 4e60047 cd13883 4917053 0e4a7a5 4917053 cd13883 0e4a7a5 4917053 0e4a7a5 4917053 0e4a7a5 cd13883 4917053 4e60047 5f4f3c1 4917053 4e60047 4917053 0e4a7a5 4e60047 0e4a7a5 4e60047 0e4a7a5 853d569 4e60047 4917053 4e60047 4917053 9376840 4917053 b752712 4917053 4e60047 b752712 4917053 4e60047 cd13883 0e4a7a5 4917053 58a5e73 cd13883 4917053 4e60047 4917053 4e60047 4917053 cd13883 0e4a7a5 4917053 4e60047 4917053 4e60047 4917053 4e60047 4917053 cd13883 0950920 58a5e73 4917053 4e60047 4917053 0e4a7a5 4917053 4e60047 4917053 4e60047 4917053 4e60047 4917053 58a5e73 4917053 4e60047 4917053 4e60047 4917053 4e60047 4917053 0e4a7a5 4e60047 0e4a7a5 4e60047 0e4a7a5 4e60047 0e4a7a5 4e60047 4917053 83207ef 4e60047 dfc5e93 0e4a7a5 4e60047 0e4a7a5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 |
import gradio as gr
from huggingface_hub import InferenceClient
import os
import re # Import regex for potential cleaning, although prompt is the primary fix
# --- Configuration ---
API_TOKEN = os.getenv("HF_TOKEN", None)
MODEL = "HuggingFaceH4/zephyr-7b-beta" # Or choose another suitable model
# --- Initialize Inference Client ---
try:
print(f"Attempting to initialize Inference Client for model: {MODEL}")
if API_TOKEN:
print("Using HF Token found in environment.")
client = InferenceClient(model=MODEL, token=API_TOKEN)
else:
print("HF Token not found. Running without token (may lead to rate limits).")
client = InferenceClient(model=MODEL)
print("Inference Client initialized successfully.")
except Exception as e:
print(f"Error initializing Inference Client: {e}")
raise gr.Error(f"Failed to initialize the AI model client for '{MODEL}'. Check model name, network, and HF_TOKEN secret if applicable. Error: {e}")
# --- Core Code Generation Function ---
def generate_code(
prompt: str,
backend_choice: str,
file_structure: str,
max_tokens: int,
temperature: float,
top_p: float,
):
"""
Generates website code based on user prompt and choices.
Yields the code token by token for live updates.
Strives to output ONLY raw code.
"""
print(f"--- Generating Code ---")
print(f"Prompt: {prompt[:100]}...")
print(f"Backend Context: {backend_choice}")
print(f"File Structure: {file_structure}") # Crucial input
print(f"Settings: Max Tokens={max_tokens}, Temp={temperature}, Top-P={top_p}")
# --- Dynamically Build System Message Based on File Structure ---
# Define specific instructions based on the user's choice
if file_structure == "Single File":
file_structure_instruction = (
"- **File Structure is 'Single File':** Generate ONLY a single, complete `index.html` file. "
"Embed ALL CSS directly within `<style>` tags inside the `<head>`. "
"Embed ALL necessary JavaScript directly within `<script>` tags just before the closing `</body>` tag. "
"Do NOT use markers like `<!-- index.html -->` or `/* style.css */`."
)
else: # Multiple Files
file_structure_instruction = (
"- **File Structure is 'Multiple Files':** Generate code for `index.html`, `style.css`, and `script.js` (if JS is needed). "
"Use these EXACT markers to separate the files:\n"
" `<!-- index.html -->`\n"
" `/* style.css */`\n"
" `// script.js` (ONLY include this marker and the JS code if JavaScript is necessary for the requested functionality).\n"
"- Place the corresponding code directly after each marker.\n"
"- Inside the `index.html` code block, ensure you correctly link the CSS (`<link rel='stylesheet' href='style.css'>`) in the `<head>`.\n"
"- Inside the `index.html` code block, ensure you correctly include the JS (`<script src='script.js'></script>`) just before the closing `</body>` tag *if* the `// script.js` marker and code are present."
)
# Assemble the full system message with the dynamic instruction
# Emphasize constraints VERY strongly
system_message = (
"You are an expert frontend web developer AI. Your SOLE task is to generate RAW SOURCE CODE (HTML, CSS, JavaScript) based on the user's request and selected options. "
"You MUST follow ALL these rules ABSOLUTELY:\n"
"1. **RAW CODE ONLY:** Your *entire* response MUST consist *only* of the requested code. NO extra text, NO explanations, NO apologies, NO introductions (like 'Here is the code...', 'Okay, here is the code...'), NO summaries, NO comments about the code (unless it's a standard code comment like `<!-- comment -->`), and ABSOLUTELY NO MARKDOWN FORMATTING like ```html, ```css, ```javascript, or ```.\n"
"2. **IMMEDIATE CODE START:** The response MUST begin *directly* with the first character of the code (e.g., `<!DOCTYPE html>` or `<!-- index.html -->`). NO leading spaces or lines.\n"
"3. **MANDATORY `index.html`:** Always generate the content for `index.html`.\n"
f"4. **FILE STRUCTURE ({file_structure}):** Strictly follow ONLY the instructions for the *selected* file structure below:\n"
f" {file_structure_instruction}\n" # Insert the specific instruction here
"5. **BACKEND CONTEXT ({backend_choice}):** Use this as a hint for frontend structure (e.g., placeholders like `{{ variable }}` if 'Flask' is chosen), but ONLY generate the static frontend code (HTML, CSS, client-side JS).\n"
"6. **FRONTEND ONLY:** Do NOT generate server-side code (Python, Node.js, etc.).\n"
"7. **ACCURACY:** Generate functional code that directly addresses the user's prompt.\n\n"
"REMEMBER: ONLY CODE. NO OTHER TEXT. START IMMEDIATELY WITH CODE." # Final reinforcement
)
# --- Construct the messages for the API ---
messages = [
{"role": "system", "content": system_message},
# Make user prompt clearer
{"role": "user", "content": f"Generate the website frontend code based on this description: {prompt}"}
]
# --- Stream the response from the API ---
response_stream = ""
full_response_for_cleaning = ""
try:
print("Sending request to Hugging Face Inference API...")
for message in client.chat_completion(
messages=messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
if isinstance(token, str):
response_stream += token
full_response_for_cleaning += token # Keep a separate copy for potential final cleaning
yield response_stream # Yield the cumulative response for live update
print(f"API stream finished. Raw length: {len(full_response_for_cleaning)}")
# --- Basic Post-Processing (Attempt to remove backticks if prompt fails) ---
# While the prompt *should* handle this, add a safety net.
cleaned_response = full_response_for_cleaning.strip()
# Remove potential leading/trailing markdown code fences more robustly
# Matches ``` followed by optional language identifier and newline, or just ```
cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
# Also remove common introductory phrases if they slip through (less likely with strong prompt)
common_intros = [
"Here is the code:", "Okay, here is the code:", "Here's the code:",
"```html", "```css", "```javascript" # Also catch these if regex missed them
]
for intro in common_intros:
if cleaned_response.lower().startswith(intro.lower()):
cleaned_response = cleaned_response[len(intro):].lstrip()
# Yield the final potentially cleaned response *once* after streaming is done
# This replaces the last yielded value from the loop if cleaning occurred
yield cleaned_response.strip() # Ensure no trailing whitespace after cleaning
except Exception as e:
error_message = f"An error occurred during the API call: {e}"
print(error_message)
yield f"## Error\n\nFailed to generate code.\n**Reason:** {e}\n\nPlease check the model status, your connection, and API token (if applicable)."
# --- Build Gradio Interface using Blocks ---
with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
gr.Markdown("# Website Code Generator π")
gr.Markdown(
"Describe the website, choose options, and get ONLY the raw frontend code. "
"Code appears live below. **Select 'Single File' or 'Multiple Files' carefully.**"
)
with gr.Row():
with gr.Column(scale=2):
prompt_input = gr.Textbox(
label="Website Description",
placeholder="e.g., A simple landing page with a navbar, hero section, and footer.",
lines=5,
)
backend_radio = gr.Radio(
["Static", "Flask", "Node.js"],
label="Backend Context Hint",
value="Static",
info="Hint for AI (e.g., template placeholders) - generates ONLY frontend code.",
)
file_structure_radio = gr.Radio(
["Multiple Files", "Single File"], # Default: Multiple
label="Output File Structure",
value="Multiple Files",
info="Choose 'Single File' for everything in index.html OR 'Multiple Files' for separate css/js.", # Clarified info
)
generate_button = gr.Button("Generate Website Code", variant="primary")
with gr.Column(scale=3):
code_output = gr.Code(
label="Generated Code (Raw Output)", # Updated label
language="html",
lines=28,
interactive=False,
)
with gr.Accordion("Advanced Generation Settings", open=False):
max_tokens_slider = gr.Slider(minimum=512, maximum=4096, value=2048, step=128, label="Max New Tokens")
temperature_slider = gr.Slider(minimum=0.1, maximum=1.2, value=0.6, step=0.1, label="Temperature")
top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P")
# --- Connect Inputs/Outputs ---
generate_button.click(
fn=generate_code,
inputs=[
prompt_input,
backend_radio,
file_structure_radio,
max_tokens_slider,
temperature_slider,
top_p_slider,
],
outputs=code_output,
)
# --- Examples ---
gr.Examples(
examples=[
["A simple counter page with a number display, an increment button, and a decrement button. Use Javascript for the logic.", "Static", "Single File"],
["A login form with fields for username and password, and a submit button. Basic styling.", "Static", "Multiple Files"],
["Product cards display grid. Each card shows an image, product name, price, and an 'Add to Cart' button. Make it responsive.", "Static", "Multiple Files"],
["A personal blog homepage with a header, a list of recent posts (just placeholders), and a sidebar with categories.", "Flask", "Multiple Files"],
["A very basic HTML page with just a title 'My App' and a heading 'Welcome'. No CSS or JS.", "Static", "Single File"]
],
inputs=[prompt_input, backend_radio, file_structure_radio],
label="Example Prompts"
)
# --- Launch ---
if __name__ == "__main__":
print("Starting Gradio app...")
demo.queue(max_size=10).launch()
print("Gradio app launched.") |