|
import os |
|
import re |
|
from http import HTTPStatus |
|
from typing import Dict, List, Optional, Tuple |
|
import base64 |
|
import mimetypes |
|
import PyPDF2 |
|
import docx |
|
import cv2 |
|
import numpy as np |
|
from PIL import Image |
|
import pytesseract |
|
import requests |
|
from urllib.parse import urlparse, urljoin |
|
from bs4 import BeautifulSoup |
|
import html2text |
|
|
|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
from tavily import TavilyClient |
|
|
|
|
|
SystemPrompt = """You are a helpful coding assistant. You help users create applications by generating code based on their requirements. |
|
When asked to create an application, you should: |
|
1. Understand the user's requirements |
|
2. Generate clean, working code |
|
3. Provide HTML output when appropriate for web applications |
|
4. Include necessary comments and documentation |
|
5. Ensure the code is functional and follows best practices |
|
|
|
For website redesign tasks: |
|
- Analyze the extracted website content to understand the structure and purpose |
|
- Create a modern, responsive design that improves upon the original |
|
- Maintain the core functionality and content while enhancing the user experience |
|
- Use modern CSS frameworks and design patterns |
|
- Ensure accessibility and mobile responsiveness |
|
|
|
If an image is provided, analyze it and use the visual information to better understand the user's requirements. |
|
|
|
Always respond with code that can be executed or rendered directly. |
|
|
|
Always output only the HTML code inside a ```html ... ``` code block, and do not include any explanations or extra text.""" |
|
|
|
|
|
SystemPromptWithSearch = """You are a helpful coding assistant with access to real-time web search. You help users create applications by generating code based on their requirements. |
|
When asked to create an application, you should: |
|
1. Understand the user's requirements |
|
2. Use web search when needed to find the latest information, best practices, or specific technologies |
|
3. Generate clean, working code |
|
4. Provide HTML output when appropriate for web applications |
|
5. Include necessary comments and documentation |
|
6. Ensure the code is functional and follows best practices |
|
|
|
For website redesign tasks: |
|
- Analyze the extracted website content to understand the structure and purpose |
|
- Use web search to find current design trends and best practices for the specific type of website |
|
- Create a modern, responsive design that improves upon the original |
|
- Maintain the core functionality and content while enhancing the user experience |
|
- Use modern CSS frameworks and design patterns |
|
- Ensure accessibility and mobile responsiveness |
|
|
|
If an image is provided, analyze it and use the visual information to better understand the user's requirements. |
|
|
|
Always respond with code that can be executed or rendered directly. |
|
|
|
Always output only the HTML code inside a ```html ... ``` code block, and do not include any explanations or extra text.""" |
|
|
|
|
|
AVAILABLE_MODELS = [ |
|
{ |
|
"name": "DeepSeek V3", |
|
"id": "deepseek-ai/DeepSeek-V3-0324", |
|
"description": "DeepSeek V3 model for code generation" |
|
}, |
|
{ |
|
"name": "DeepSeek R1", |
|
"id": "deepseek-ai/DeepSeek-R1-0528", |
|
"description": "DeepSeek R1 model for code generation" |
|
}, |
|
{ |
|
"name": "ERNIE-4.5-VL", |
|
"id": "baidu/ERNIE-4.5-VL-424B-A47B-Base-PT", |
|
"description": "ERNIE-4.5-VL model for multimodal code generation with image support" |
|
}, |
|
{ |
|
"name": "MiniMax M1", |
|
"id": "MiniMaxAI/MiniMax-M1-80k", |
|
"description": "MiniMax M1 model for code generation and general tasks" |
|
}, |
|
{ |
|
"name": "Qwen3-235B-A22B", |
|
"id": "Qwen/Qwen3-235B-A22B", |
|
"description": "Qwen3-235B-A22B model for code generation and general tasks" |
|
}, |
|
{ |
|
"name": "SmolLM3-3B", |
|
"id": "HuggingFaceTB/SmolLM3-3B", |
|
"description": "SmolLM3-3B model for code generation and general tasks" |
|
} |
|
] |
|
|
|
DEMO_LIST = [ |
|
{ |
|
"title": "Todo App", |
|
"description": "Create a simple todo application with add, delete, and mark as complete functionality" |
|
}, |
|
{ |
|
"title": "Calculator", |
|
"description": "Build a basic calculator with addition, subtraction, multiplication, and division" |
|
}, |
|
{ |
|
"title": "Weather Dashboard", |
|
"description": "Create a weather dashboard that displays current weather information" |
|
}, |
|
{ |
|
"title": "Chat Interface", |
|
"description": "Build a chat interface with message history and user input" |
|
}, |
|
{ |
|
"title": "E-commerce Product Card", |
|
"description": "Create a product card component for an e-commerce website" |
|
}, |
|
{ |
|
"title": "Login Form", |
|
"description": "Build a responsive login form with validation" |
|
}, |
|
{ |
|
"title": "Dashboard Layout", |
|
"description": "Create a dashboard layout with sidebar navigation and main content area" |
|
}, |
|
{ |
|
"title": "Data Table", |
|
"description": "Build a data table with sorting and filtering capabilities" |
|
}, |
|
{ |
|
"title": "Image Gallery", |
|
"description": "Create an image gallery with lightbox functionality and responsive grid layout" |
|
}, |
|
{ |
|
"title": "UI from Image", |
|
"description": "Upload an image of a UI design and I'll generate the HTML/CSS code for it" |
|
}, |
|
{ |
|
"title": "Extract Text from Image", |
|
"description": "Upload an image containing text and I'll extract and process the text content" |
|
}, |
|
{ |
|
"title": "Website Redesign", |
|
"description": "Enter a website URL to extract its content and redesign it with a modern, responsive layout" |
|
} |
|
] |
|
|
|
|
|
YOUR_API_TOKEN = os.getenv('HF_TOKEN') |
|
client = InferenceClient( |
|
provider="auto", |
|
api_key=YOUR_API_TOKEN, |
|
bill_to="huggingface" |
|
) |
|
|
|
|
|
TAVILY_API_KEY = os.getenv('TAVILY_API_KEY') |
|
tavily_client = None |
|
if TAVILY_API_KEY: |
|
try: |
|
tavily_client = TavilyClient(api_key=TAVILY_API_KEY) |
|
except Exception as e: |
|
print(f"Failed to initialize Tavily client: {e}") |
|
tavily_client = None |
|
|
|
History = List[Tuple[str, str]] |
|
Messages = List[Dict[str, str]] |
|
|
|
def history_to_messages(history: History, system: str) -> Messages: |
|
messages = [{'role': 'system', 'content': system}] |
|
for h in history: |
|
|
|
user_content = h[0] |
|
if isinstance(user_content, list): |
|
|
|
text_content = "" |
|
for item in user_content: |
|
if isinstance(item, dict) and item.get("type") == "text": |
|
text_content += item.get("text", "") |
|
user_content = text_content if text_content else str(user_content) |
|
|
|
messages.append({'role': 'user', 'content': user_content}) |
|
messages.append({'role': 'assistant', 'content': h[1]}) |
|
return messages |
|
|
|
def messages_to_history(messages: Messages) -> Tuple[str, History]: |
|
assert messages[0]['role'] == 'system' |
|
history = [] |
|
for q, r in zip(messages[1::2], messages[2::2]): |
|
|
|
user_content = q['content'] |
|
if isinstance(user_content, list): |
|
text_content = "" |
|
for item in user_content: |
|
if isinstance(item, dict) and item.get("type") == "text": |
|
text_content += item.get("text", "") |
|
user_content = text_content if text_content else str(user_content) |
|
|
|
history.append([user_content, r['content']]) |
|
return history |
|
|
|
def history_to_chatbot_messages(history: History) -> List[Dict[str, str]]: |
|
"""Convert history tuples to chatbot message format""" |
|
messages = [] |
|
for user_msg, assistant_msg in history: |
|
|
|
if isinstance(user_msg, list): |
|
text_content = "" |
|
for item in user_msg: |
|
if isinstance(item, dict) and item.get("type") == "text": |
|
text_content += item.get("text", "") |
|
user_msg = text_content if text_content else str(user_msg) |
|
|
|
messages.append({"role": "user", "content": user_msg}) |
|
messages.append({"role": "assistant", "content": assistant_msg}) |
|
return messages |
|
|
|
def remove_code_block(text): |
|
|
|
patterns = [ |
|
r'```(?:html|HTML)\n([\s\S]+?)\n```', |
|
r'```\n([\s\S]+?)\n```', |
|
r'```([\s\S]+?)```' |
|
] |
|
for pattern in patterns: |
|
match = re.search(pattern, text, re.DOTALL) |
|
if match: |
|
extracted = match.group(1).strip() |
|
return extracted |
|
|
|
if text.strip().startswith('<!DOCTYPE html>') or text.strip().startswith('<html') or text.strip().startswith('<'): |
|
return text.strip() |
|
return text.strip() |
|
|
|
def history_render(history: History): |
|
return gr.update(visible=True), history |
|
|
|
def clear_history(): |
|
return [], [], None, "" |
|
|
|
def update_image_input_visibility(model): |
|
"""Update image input visibility based on selected model""" |
|
is_ernie_vl = model.get("id") == "baidu/ERNIE-4.5-VL-424B-A47B-Base-PT" |
|
return gr.update(visible=is_ernie_vl) |
|
|
|
def process_image_for_model(image): |
|
"""Convert image to base64 for model input""" |
|
if image is None: |
|
return None |
|
|
|
|
|
import io |
|
import base64 |
|
import numpy as np |
|
from PIL import Image |
|
|
|
|
|
if isinstance(image, np.ndarray): |
|
image = Image.fromarray(image) |
|
|
|
buffer = io.BytesIO() |
|
image.save(buffer, format='PNG') |
|
img_str = base64.b64encode(buffer.getvalue()).decode() |
|
return f"data:image/png;base64,{img_str}" |
|
|
|
def create_multimodal_message(text, image=None): |
|
"""Create a multimodal message with text and optional image""" |
|
if image is None: |
|
return {"role": "user", "content": text} |
|
|
|
content = [ |
|
{ |
|
"type": "text", |
|
"text": text |
|
}, |
|
{ |
|
"type": "image_url", |
|
"image_url": { |
|
"url": process_image_for_model(image) |
|
} |
|
} |
|
] |
|
|
|
return {"role": "user", "content": content} |
|
|
|
|
|
|
|
|
|
def perform_web_search(query: str, max_results: int = 5, include_domains=None, exclude_domains=None) -> str: |
|
"""Perform web search using Tavily with default parameters""" |
|
if not tavily_client: |
|
return "Web search is not available. Please set the TAVILY_API_KEY environment variable." |
|
|
|
try: |
|
|
|
search_params = { |
|
"search_depth": "advanced", |
|
"max_results": min(max(1, max_results), 20) |
|
} |
|
if include_domains is not None: |
|
search_params["include_domains"] = include_domains |
|
if exclude_domains is not None: |
|
search_params["exclude_domains"] = exclude_domains |
|
|
|
response = tavily_client.search(query, **search_params) |
|
|
|
search_results = [] |
|
for result in response.get('results', []): |
|
title = result.get('title', 'No title') |
|
url = result.get('url', 'No URL') |
|
content = result.get('content', 'No content') |
|
search_results.append(f"Title: {title}\nURL: {url}\nContent: {content}\n") |
|
|
|
if search_results: |
|
return "Web Search Results:\n\n" + "\n---\n".join(search_results) |
|
else: |
|
return "No search results found." |
|
|
|
except Exception as e: |
|
return f"Search error: {str(e)}" |
|
|
|
def enhance_query_with_search(query: str, enable_search: bool) -> str: |
|
"""Enhance the query with web search results if search is enabled""" |
|
if not enable_search or not tavily_client: |
|
return query |
|
|
|
|
|
search_results = perform_web_search(query) |
|
|
|
|
|
enhanced_query = f"""Original Query: {query} |
|
|
|
{search_results} |
|
|
|
Please use the search results above to help create the requested application with the most up-to-date information and best practices.""" |
|
|
|
return enhanced_query |
|
|
|
def send_to_sandbox(code): |
|
|
|
wrapped_code = f""" |
|
<!DOCTYPE html> |
|
<html> |
|
<head> |
|
<meta charset=\"UTF-8\"> |
|
<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"> |
|
<script> |
|
// Safe localStorage polyfill |
|
const safeStorage = {{ |
|
_data: {{}}, |
|
getItem: function(key) {{ return this._data[key] || null; }}, |
|
setItem: function(key, value) {{ this._data[key] = value; }}, |
|
removeItem: function(key) {{ delete this._data[key]; }}, |
|
clear: function() {{ this._data = {{}}; }} |
|
}}; |
|
Object.defineProperty(window, 'localStorage', {{ |
|
value: safeStorage, |
|
writable: false |
|
}}); |
|
window.onerror = function(message, source, lineno, colno, error) {{ |
|
console.error('Error:', message); |
|
}}; |
|
</script> |
|
</head> |
|
<body> |
|
{code} |
|
</body> |
|
</html> |
|
""" |
|
encoded_html = base64.b64encode(wrapped_code.encode('utf-8')).decode('utf-8') |
|
data_uri = f"data:text/html;charset=utf-8;base64,{encoded_html}" |
|
iframe = f'<iframe src="{data_uri}" width="100%" height="920px" sandbox="allow-scripts allow-same-origin allow-forms allow-popups allow-modals allow-presentation" allow="display-capture"></iframe>' |
|
return iframe |
|
|
|
def demo_card_click(e: gr.EventData): |
|
try: |
|
|
|
if hasattr(e, '_data') and e._data: |
|
|
|
if 'index' in e._data: |
|
index = e._data['index'] |
|
elif 'component' in e._data and 'index' in e._data['component']: |
|
index = e._data['component']['index'] |
|
elif 'target' in e._data and 'index' in e._data['target']: |
|
index = e._data['target']['index'] |
|
else: |
|
|
|
index = 0 |
|
else: |
|
index = 0 |
|
|
|
|
|
if index >= len(DEMO_LIST): |
|
index = 0 |
|
|
|
return DEMO_LIST[index]['description'] |
|
except (KeyError, IndexError, AttributeError) as e: |
|
|
|
return DEMO_LIST[0]['description'] |
|
|
|
def extract_text_from_image(image_path): |
|
"""Extract text from image using OCR""" |
|
try: |
|
|
|
try: |
|
pytesseract.get_tesseract_version() |
|
except Exception: |
|
return "Error: Tesseract OCR is not installed. Please install Tesseract to extract text from images. See install_tesseract.md for instructions." |
|
|
|
|
|
image = cv2.imread(image_path) |
|
if image is None: |
|
return "Error: Could not read image file" |
|
|
|
|
|
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) |
|
|
|
|
|
|
|
gray = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY) |
|
|
|
|
|
_, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) |
|
|
|
|
|
text = pytesseract.image_to_string(binary, config='--psm 6') |
|
|
|
return text.strip() if text.strip() else "No text found in image" |
|
|
|
except Exception as e: |
|
return f"Error extracting text from image: {e}" |
|
|
|
def extract_text_from_file(file_path): |
|
if not file_path: |
|
return "" |
|
mime, _ = mimetypes.guess_type(file_path) |
|
ext = os.path.splitext(file_path)[1].lower() |
|
try: |
|
if ext == ".pdf": |
|
with open(file_path, "rb") as f: |
|
reader = PyPDF2.PdfReader(f) |
|
return "\n".join(page.extract_text() or "" for page in reader.pages) |
|
elif ext in [".txt", ".md"]: |
|
with open(file_path, "r", encoding="utf-8") as f: |
|
return f.read() |
|
elif ext == ".csv": |
|
with open(file_path, "r", encoding="utf-8") as f: |
|
return f.read() |
|
elif ext == ".docx": |
|
doc = docx.Document(file_path) |
|
return "\n".join([para.text for para in doc.paragraphs]) |
|
elif ext.lower() in [".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".tif", ".gif", ".webp"]: |
|
return extract_text_from_image(file_path) |
|
else: |
|
return "" |
|
except Exception as e: |
|
return f"Error extracting text: {e}" |
|
|
|
def extract_website_content(url: str) -> str: |
|
"""Extract content from a website URL""" |
|
try: |
|
|
|
parsed_url = urlparse(url) |
|
if not parsed_url.scheme: |
|
url = "https://" + url |
|
parsed_url = urlparse(url) |
|
|
|
if not parsed_url.netloc: |
|
return "Error: Invalid URL provided" |
|
|
|
|
|
headers = { |
|
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36', |
|
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', |
|
'Accept-Language': 'en-US,en;q=0.9', |
|
'Accept-Encoding': 'gzip, deflate, br', |
|
'DNT': '1', |
|
'Connection': 'keep-alive', |
|
'Upgrade-Insecure-Requests': '1', |
|
'Sec-Fetch-Dest': 'document', |
|
'Sec-Fetch-Mode': 'navigate', |
|
'Sec-Fetch-Site': 'none', |
|
'Sec-Fetch-User': '?1', |
|
'Cache-Control': 'max-age=0' |
|
} |
|
|
|
|
|
session = requests.Session() |
|
session.headers.update(headers) |
|
|
|
|
|
max_retries = 3 |
|
for attempt in range(max_retries): |
|
try: |
|
response = session.get(url, timeout=15, allow_redirects=True) |
|
response.raise_for_status() |
|
break |
|
except requests.exceptions.HTTPError as e: |
|
if e.response.status_code == 403 and attempt < max_retries - 1: |
|
|
|
session.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36' |
|
continue |
|
else: |
|
raise |
|
|
|
|
|
soup = BeautifulSoup(response.content, 'html.parser') |
|
|
|
|
|
for script in soup(["script", "style"]): |
|
script.decompose() |
|
|
|
|
|
title = soup.find('title') |
|
title_text = title.get_text().strip() if title else "No title found" |
|
|
|
|
|
meta_desc = soup.find('meta', attrs={'name': 'description'}) |
|
description = meta_desc.get('content', '') if meta_desc else "" |
|
|
|
|
|
content_sections = [] |
|
|
|
|
|
main_selectors = [ |
|
'main', 'article', '.content', '.main-content', '.post-content', |
|
'#content', '#main', '.entry-content', '.post-body' |
|
] |
|
|
|
for selector in main_selectors: |
|
elements = soup.select(selector) |
|
for element in elements: |
|
text = element.get_text().strip() |
|
if len(text) > 100: |
|
content_sections.append(text) |
|
|
|
|
|
if not content_sections: |
|
body = soup.find('body') |
|
if body: |
|
|
|
for element in body.find_all(['nav', 'footer', 'header', 'aside']): |
|
element.decompose() |
|
content_sections.append(body.get_text().strip()) |
|
|
|
|
|
nav_links = [] |
|
nav_elements = soup.find_all(['nav', 'header']) |
|
for nav in nav_elements: |
|
links = nav.find_all('a') |
|
for link in links: |
|
link_text = link.get_text().strip() |
|
link_href = link.get('href', '') |
|
if link_text and link_href: |
|
nav_links.append(f"{link_text}: {link_href}") |
|
|
|
|
|
images = [] |
|
img_elements = soup.find_all('img') |
|
for img in img_elements: |
|
src = img.get('src', '') |
|
alt = img.get('alt', '') |
|
if src: |
|
|
|
if not src.startswith(('http://', 'https://')): |
|
src = urljoin(url, src) |
|
images.append(f"Image: {alt} ({src})") |
|
|
|
|
|
website_content = f""" |
|
WEBSITE CONTENT EXTRACTION |
|
========================== |
|
|
|
URL: {url} |
|
Title: {title_text} |
|
Description: {description} |
|
|
|
NAVIGATION MENU: |
|
{chr(10).join(nav_links[:10]) if nav_links else "No navigation found"} |
|
|
|
MAIN CONTENT: |
|
{chr(10).join(content_sections[:3]) if content_sections else "No main content found"} |
|
|
|
IMAGES: |
|
{chr(10).join(images[:10]) if images else "No images found"} |
|
|
|
PAGE STRUCTURE: |
|
- This appears to be a {title_text.lower()} website |
|
- Contains {len(content_sections)} main content sections |
|
- Has {len(nav_links)} navigation links |
|
- Includes {len(images)} images |
|
""" |
|
|
|
return website_content.strip() |
|
|
|
except requests.exceptions.HTTPError as e: |
|
if e.response.status_code == 403: |
|
return f"Error: Website blocked access (403 Forbidden). This website may have anti-bot protection. Try a different website or provide a description of what you want to build instead." |
|
elif e.response.status_code == 404: |
|
return f"Error: Website not found (404). Please check the URL and try again." |
|
elif e.response.status_code >= 500: |
|
return f"Error: Website server error ({e.response.status_code}). Please try again later." |
|
else: |
|
return f"Error accessing website: HTTP {e.response.status_code} - {str(e)}" |
|
except requests.exceptions.Timeout: |
|
return "Error: Request timed out. The website may be slow or unavailable." |
|
except requests.exceptions.ConnectionError: |
|
return "Error: Could not connect to the website. Please check your internet connection and the URL." |
|
except requests.exceptions.RequestException as e: |
|
return f"Error accessing website: {str(e)}" |
|
except Exception as e: |
|
return f"Error extracting website content: {str(e)}" |
|
|
|
def generation_code(query: Optional[str], image: Optional[gr.Image], file: Optional[str], website_url: Optional[str], _setting: Dict[str, str], _history: Optional[History], _current_model: Dict, enable_search: bool = False): |
|
if query is None: |
|
query = '' |
|
if _history is None: |
|
_history = [] |
|
|
|
|
|
system_prompt = SystemPromptWithSearch if enable_search else _setting['system'] |
|
messages = history_to_messages(_history, system_prompt) |
|
|
|
|
|
file_text = "" |
|
if file: |
|
file_text = extract_text_from_file(file) |
|
if file_text: |
|
file_text = file_text[:5000] |
|
query = f"{query}\n\n[Reference file content below]\n{file_text}" |
|
|
|
|
|
website_text = "" |
|
if website_url and website_url.strip(): |
|
website_text = extract_website_content(website_url.strip()) |
|
if website_text and not website_text.startswith("Error"): |
|
website_text = website_text[:8000] |
|
query = f"{query}\n\n[Website content to redesign below]\n{website_text}" |
|
elif website_text.startswith("Error"): |
|
|
|
fallback_guidance = """ |
|
Since I couldn't extract the website content, please provide additional details about what you'd like to build: |
|
|
|
1. What type of website is this? (e.g., e-commerce, blog, portfolio, dashboard) |
|
2. What are the main features you want? |
|
3. What's the target audience? |
|
4. Any specific design preferences? (colors, style, layout) |
|
|
|
This will help me create a better design for you.""" |
|
query = f"{query}\n\n[Error extracting website: {website_text}]{fallback_guidance}" |
|
|
|
|
|
enhanced_query = enhance_query_with_search(query, enable_search) |
|
|
|
if image is not None: |
|
messages.append(create_multimodal_message(enhanced_query, image)) |
|
else: |
|
messages.append({'role': 'user', 'content': enhanced_query}) |
|
try: |
|
completion = client.chat.completions.create( |
|
model=_current_model["id"], |
|
messages=messages, |
|
stream=True, |
|
max_tokens=5000 |
|
) |
|
content = "" |
|
for chunk in completion: |
|
if chunk.choices[0].delta.content: |
|
content += chunk.choices[0].delta.content |
|
clean_code = remove_code_block(content) |
|
search_status = " (with web search)" if enable_search and tavily_client else "" |
|
yield { |
|
code_output: clean_code, |
|
history_output: history_to_chatbot_messages(_history), |
|
} |
|
_history = messages_to_history(messages + [{ |
|
'role': 'assistant', |
|
'content': content |
|
}]) |
|
yield { |
|
code_output: remove_code_block(content), |
|
history: _history, |
|
sandbox: send_to_sandbox(remove_code_block(content)), |
|
history_output: history_to_chatbot_messages(_history), |
|
} |
|
except Exception as e: |
|
error_message = f"Error: {str(e)}" |
|
yield { |
|
code_output: error_message, |
|
history_output: history_to_chatbot_messages(_history), |
|
} |
|
|
|
|
|
with gr.Blocks( |
|
theme=gr.themes.Base( |
|
primary_hue="blue", |
|
secondary_hue="gray", |
|
neutral_hue="gray", |
|
font=gr.themes.GoogleFont("Inter"), |
|
font_mono=gr.themes.GoogleFont("JetBrains Mono"), |
|
text_size=gr.themes.sizes.text_md, |
|
spacing_size=gr.themes.sizes.spacing_md, |
|
radius_size=gr.themes.sizes.radius_md |
|
), |
|
title="AnyCoder - AI Code Generator" |
|
) as demo: |
|
history = gr.State([]) |
|
setting = gr.State({ |
|
"system": SystemPrompt, |
|
}) |
|
current_model = gr.State(AVAILABLE_MODELS[0]) |
|
open_panel = gr.State(None) |
|
|
|
with gr.Sidebar(): |
|
gr.Markdown("# AnyCoder") |
|
gr.Markdown("*AI-Powered Code Generator*") |
|
|
|
|
|
input = gr.Textbox( |
|
label="What would you like to build?", |
|
placeholder="Describe your application...", |
|
lines=3 |
|
) |
|
|
|
|
|
website_url_input = gr.Textbox( |
|
label="Website URL (for redesign)", |
|
placeholder="https://example.com", |
|
lines=1, |
|
visible=True |
|
) |
|
|
|
|
|
file_input = gr.File( |
|
label="Reference file", |
|
file_types=[".pdf", ".txt", ".md", ".csv", ".docx", ".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".tif", ".gif", ".webp"], |
|
visible=True |
|
) |
|
|
|
|
|
image_input = gr.Image( |
|
label="UI design image", |
|
visible=False |
|
) |
|
|
|
|
|
with gr.Row(): |
|
btn = gr.Button("Generate", variant="primary", size="lg", scale=2) |
|
clear_btn = gr.Button("Clear", variant="secondary", size="sm", scale=1) |
|
|
|
|
|
search_toggle = gr.Checkbox( |
|
label="🔍 Web search", |
|
value=False |
|
) |
|
|
|
|
|
model_dropdown = gr.Dropdown( |
|
choices=[model['name'] for model in AVAILABLE_MODELS], |
|
value=AVAILABLE_MODELS[0]['name'], |
|
label="Model" |
|
) |
|
|
|
|
|
gr.Markdown("**Quick start**") |
|
with gr.Column(): |
|
for i, demo_item in enumerate(DEMO_LIST[:3]): |
|
demo_card = gr.Button( |
|
value=demo_item['title'], |
|
variant="secondary", |
|
size="sm" |
|
) |
|
demo_card.click( |
|
fn=lambda idx=i: gr.update(value=DEMO_LIST[idx]['description']), |
|
outputs=input |
|
) |
|
|
|
|
|
if not tavily_client: |
|
gr.Markdown("⚠️ Web search unavailable") |
|
else: |
|
gr.Markdown("✅ Web search available") |
|
|
|
|
|
model_display = gr.Markdown(f"**Model:** {AVAILABLE_MODELS[0]['name']}", visible=False) |
|
|
|
def on_model_change(model_name): |
|
for m in AVAILABLE_MODELS: |
|
if m['name'] == model_name: |
|
return m, f"**Model:** {m['name']}", update_image_input_visibility(m) |
|
return AVAILABLE_MODELS[0], f"**Model:** {AVAILABLE_MODELS[0]['name']}", update_image_input_visibility(AVAILABLE_MODELS[0]) |
|
|
|
def save_prompt(input): |
|
return {setting: {"system": input}} |
|
|
|
model_dropdown.change( |
|
on_model_change, |
|
inputs=model_dropdown, |
|
outputs=[current_model, model_display, image_input] |
|
) |
|
|
|
|
|
with gr.Accordion("Advanced", open=False): |
|
systemPromptInput = gr.Textbox( |
|
value=SystemPrompt, |
|
label="System prompt", |
|
lines=5 |
|
) |
|
save_prompt_btn = gr.Button("Save", variant="primary", size="sm") |
|
save_prompt_btn.click(save_prompt, inputs=systemPromptInput, outputs=setting) |
|
|
|
with gr.Column(): |
|
with gr.Tabs(): |
|
with gr.Tab("Code"): |
|
code_output = gr.Code( |
|
language="html", |
|
lines=25, |
|
interactive=False, |
|
label="Generated code" |
|
) |
|
with gr.Tab("Preview"): |
|
sandbox = gr.HTML(label="Live preview") |
|
with gr.Tab("History"): |
|
history_output = gr.Chatbot(show_label=False, height=400, type="messages") |
|
|
|
|
|
|
|
btn.click( |
|
generation_code, |
|
inputs=[input, image_input, file_input, website_url_input, setting, history, current_model, search_toggle], |
|
outputs=[code_output, history, sandbox, history_output] |
|
) |
|
clear_btn.click(clear_history, outputs=[history, history_output, file_input, website_url_input]) |
|
|
|
if __name__ == "__main__": |
|
demo.queue(default_concurrency_limit=20).launch(ssr_mode=True, mcp_server=True) |