nevergiveup / app.py
abdullahalioo's picture
Update app.py
2daac4f verified
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
model_id = "Qwen/Qwen2.5-Coder-7B-Instruct"
# Load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
model_id,
trust_remote_code=True,
device_map="auto",
torch_dtype=torch.float32 # CPU compatible
)
# Prompt formatter
def generate_html_code(prompt):
instruction = f"Write only valid HTML code for this: {prompt}\n"
inputs = tokenizer(instruction, return_tensors="pt")
outputs = model.generate(
**inputs,
max_new_tokens=300,
temperature=0.7,
do_sample=True,
top_p=0.95
)
html_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Optional: clean up output to extract HTML only
start = html_code.find("<")
if start != -1:
html_code = html_code[start:]
return html_code, html_code # code + live preview
# Gradio UI
demo = gr.Interface(
fn=generate_html_code,
inputs=gr.Textbox(label="Describe your HTML component", placeholder="e.g. A red button that says Click Me", lines=3),
outputs=[
gr.Code(label="Generated HTML Code", language="html"),
gr.HTML(label="Live Preview")
],
title="HTML Generator with Qwen2.5",
description="Generate and preview HTML in real-time using the Qwen2.5 32B Coder model (CPU mode, may be slow!)"
)
demo.launch()