Spaces:
Running
Running
import os | |
import logging | |
import gradio as gr | |
from dotenv import load_dotenv | |
import google.generativeai as genai | |
from auto_diffusers import AutoDiffusersGenerator | |
from simple_memory_calculator import SimpleMemoryCalculator | |
load_dotenv() | |
# Configure logging for Gradio app | |
logger = logging.getLogger(__name__) | |
class GradioAutodiffusers: | |
def __init__(self): | |
logger.info("Initializing GradioAutodiffusers") | |
self.api_key = os.getenv('GOOGLE_API_KEY') | |
if not self.api_key: | |
logger.error("GOOGLE_API_KEY not found in environment variables") | |
raise ValueError("GOOGLE_API_KEY not found in .env file") | |
logger.debug(f"API key found, length: {len(self.api_key)}") | |
try: | |
self.generator = AutoDiffusersGenerator(self.api_key) | |
logger.info("AutoDiffusersGenerator initialized successfully") | |
except Exception as e: | |
logger.error(f"Failed to initialize AutoDiffusersGenerator: {e}") | |
raise | |
try: | |
self.memory_calculator = SimpleMemoryCalculator() | |
logger.info("SimpleMemoryCalculator initialized successfully") | |
except Exception as e: | |
logger.error(f"Failed to initialize SimpleMemoryCalculator: {e}") | |
raise | |
# Default settings | |
self.current_model = 'gemini-2.5-flash-preview-05-20' | |
self.temperature = 0.7 | |
self.max_output_tokens = 8192 | |
self.top_p = 0.9 | |
self.top_k = 40 | |
logger.debug(f"Default model settings: {self.current_model}, temp={self.temperature}") | |
def update_model_settings(self, model_name, temperature, max_output_tokens, top_p, top_k): | |
"""Update Gemini model settings.""" | |
logger.info(f"Updating model settings: {model_name}") | |
logger.debug(f"New settings: temp={temperature}, max_tokens={max_output_tokens}, top_p={top_p}, top_k={top_k}") | |
try: | |
self.current_model = model_name | |
self.temperature = temperature | |
self.max_output_tokens = max_output_tokens | |
self.top_p = top_p | |
self.top_k = top_k | |
# Update the generator's model with new settings | |
genai.configure(api_key=self.api_key) | |
generation_config = genai.types.GenerationConfig( | |
temperature=temperature, | |
max_output_tokens=max_output_tokens, | |
top_p=top_p, | |
top_k=top_k | |
) | |
self.generator.model = genai.GenerativeModel(model_name, generation_config=generation_config) | |
logger.info("Model settings updated successfully") | |
return f"✅ Model updated to {model_name} with new settings" | |
except Exception as e: | |
logger.error(f"Failed to update model settings: {e}") | |
return f"❌ Failed to update model: {str(e)}" | |
def get_generation_prompt(self, model_name, prompt_text, image_size, num_inference_steps, hardware_specs, optimization_profile): | |
"""Get the actual prompt that will be sent to Gemini API.""" | |
return self.generator._create_generation_prompt( | |
model_name, prompt_text, image_size, num_inference_steps, | |
hardware_specs, optimization_profile | |
) | |
def analyze_model_memory(self, model_name, vram_gb): | |
"""Analyze model memory requirements and provide recommendations.""" | |
try: | |
if not vram_gb: | |
vram_gb = 8 # Default | |
memory_info = self.memory_calculator.get_model_memory_requirements(model_name) | |
recommendations = self.memory_calculator.get_memory_recommendation(model_name, float(vram_gb)) | |
formatted_info = self.memory_calculator.format_memory_info(model_name) | |
return memory_info, recommendations, formatted_info | |
except Exception as e: | |
error_msg = f"Error analyzing model memory: {str(e)}" | |
return {'error': error_msg}, {'error': error_msg}, error_msg | |
def generate_code_with_manual_specs(self, | |
gpu_name, | |
vram_gb, | |
ram_gb, | |
platform, | |
model_name, | |
prompt_text, | |
dtype_selection, | |
width, | |
height, | |
inference_steps, | |
memory_analysis=None): | |
"""Generate optimized code with manual hardware specifications.""" | |
try: | |
# Create manual hardware specs | |
# Parse dtype selection | |
if dtype_selection == "Auto": | |
user_dtype = None | |
else: | |
user_dtype = f"torch.{dtype_selection}" | |
manual_specs = { | |
'platform': platform, | |
'architecture': 'manual_input', | |
'cpu_count': 8, # Default | |
'python_version': '3.11', | |
'cuda_available': 'nvidia' in gpu_name.lower() if gpu_name else False, | |
'mps_available': platform == 'Darwin' and 'apple' in gpu_name.lower() if gpu_name else False, | |
'torch_version': '2.0+', | |
'manual_input': True, | |
'ram_gb': int(ram_gb) if ram_gb else 16, | |
'user_dtype': user_dtype | |
} | |
# Add GPU info if provided | |
if gpu_name and vram_gb: | |
manual_specs['gpu_info'] = [{ | |
'name': gpu_name, | |
'memory_mb': int(vram_gb) * 1024 | |
}] | |
if 'nvidia' in gpu_name.lower(): | |
manual_specs['cuda_available'] = True | |
manual_specs['cuda_device_count'] = 1 | |
manual_specs['cuda_device_name'] = gpu_name | |
manual_specs['cuda_memory'] = int(vram_gb) | |
else: | |
manual_specs['gpu_info'] = None | |
# Generate optimized code with manual specs and memory analysis | |
optimized_code = self.generator.generate_optimized_code( | |
model_name=model_name, | |
prompt_text=prompt_text, | |
image_size=(int(height), int(width)), | |
num_inference_steps=int(inference_steps), | |
use_manual_specs=True, | |
manual_specs=manual_specs, | |
memory_analysis=memory_analysis | |
) | |
# Clean up any markdown formatting | |
if optimized_code.startswith('```python'): | |
optimized_code = optimized_code[9:] | |
if optimized_code.endswith('```'): | |
optimized_code = optimized_code[:-3] | |
return optimized_code.strip() | |
except Exception as e: | |
return f"Error generating code: {str(e)}" | |
def create_gradio_interface(): | |
"""Create and configure the Gradio interface.""" | |
app = GradioAutodiffusers() | |
with gr.Blocks( | |
title="Auto-Diffusers Code Generator", | |
theme=gr.themes.Soft( | |
primary_hue="violet", | |
secondary_hue="blue", | |
neutral_hue="slate", | |
radius_size=gr.themes.sizes.radius_lg, | |
font=[gr.themes.GoogleFont("Poppins"), gr.themes.GoogleFont("Inter"), "system-ui", "sans-serif"] | |
).set( | |
background_fill_primary="*neutral_25", | |
background_fill_secondary="*neutral_50", | |
block_background_fill="rgba(255, 255, 255, 0.95)", | |
block_border_width="0px", | |
block_shadow="0 8px 32px rgba(0, 0, 0, 0.08)", | |
panel_background_fill="rgba(255, 255, 255, 0.9)", | |
button_primary_background_fill="*primary_500", | |
button_primary_background_fill_hover="*primary_600", | |
button_secondary_background_fill="rgba(255, 255, 255, 0.8)", | |
button_secondary_background_fill_hover="rgba(255, 255, 255, 0.95)" | |
), | |
css=""" | |
/* Global Styles */ | |
.gradio-container { | |
background: #fef7f5 !important; | |
min-height: 100vh; | |
font-family: 'Georgia', 'Times New Roman', serif !important; | |
} | |
/* Remove main tag margin and center */ | |
main { | |
margin: 0 auto !important; | |
max-width: 100% !important; | |
} | |
.main-container { | |
max-width: 1400px; | |
margin: 0 auto; | |
padding: 0.5rem 0.1rem; | |
/* Removed position: relative that can interfere with dropdown positioning */ | |
} | |
/* Paper Card Effects */ | |
.glass-card { | |
background: #fefcfa !important; | |
border: 1px solid #f4e6e1 !important; | |
border-radius: 12px !important; | |
margin-bottom: 0.5rem !important; | |
padding: 0.5rem !important; | |
border-top: 3px solid #f0c5b8 !important; | |
} | |
.ultra-glass { | |
background: #fefcfa !important; | |
border: 1px solid #f4e6e1 !important; | |
border-radius: 12px !important; | |
margin-bottom: 0.5rem !important; | |
padding: 0.5rem !important; | |
border-left: 4px solid #f0c5b8 !important; | |
} | |
/* Paper Header */ | |
.hero-header { | |
background: #fdf5f3 !important; | |
border: 2px solid #f4e6e1 !important; | |
border-radius: 16px !important; | |
margin-bottom: 1rem !important; | |
position: relative; | |
overflow: hidden; | |
width: 100% !important; | |
max-width: 100% !important; | |
box-sizing: border-box !important; | |
} | |
/* Paper Buttons */ | |
.generate-btn { | |
background: #e67e5a !important; | |
border: 2px solid #d96b47 !important; | |
color: #fefcfa !important; | |
font-weight: 600 !important; | |
font-size: 1.3rem !important; | |
padding: 1.2rem 2.5rem !important; | |
border-radius: 12px !important; | |
transition: all 0.3s ease !important; | |
font-family: 'Georgia', serif !important; | |
letter-spacing: 0.5px !important; | |
} | |
/* View Prompt Button */ | |
.view-prompt-btn { | |
background: #f4e6e1 !important; | |
border: 1px solid #e8a491 !important; | |
color: #5a3a2a !important; | |
font-weight: 500 !important; | |
font-size: 0.9rem !important; | |
padding: 0.5rem 1rem !important; | |
border-radius: 8px !important; | |
transition: all 0.2s ease !important; | |
font-family: 'Georgia', serif !important; | |
margin-bottom: 0.5rem !important; | |
} | |
.view-prompt-btn:hover { | |
background: #f0c5b8 !important; | |
} | |
/* Modal Overlay - simple approach */ | |
.modal-overlay { | |
position: fixed !important; | |
top: 0 !important; | |
left: 0 !important; | |
width: 100vw !important; | |
height: 100vh !important; | |
background: rgba(0, 0, 0, 0.5) !important; | |
z-index: 9999 !important; | |
justify-content: center !important; | |
align-items: center !important; | |
padding: 2rem !important; | |
box-sizing: border-box !important; | |
} | |
/* Show as flex only when visible class is present */ | |
.modal-overlay.visible { | |
display: flex !important; | |
} | |
/* Modal Content */ | |
.modal-content { | |
background: #fefcfa !important; | |
border: 2px solid #f0c5b8 !important; | |
border-radius: 16px !important; | |
max-width: 90vw !important; | |
max-height: 80vh !important; | |
width: 800px !important; | |
padding: 2rem !important; | |
box-shadow: 0 20px 60px rgba(0, 0, 0, 0.3) !important; | |
overflow: hidden !important; | |
flex-direction: column !important; | |
} | |
/* Modal Header */ | |
.modal-header { | |
justify-content: space-between !important; | |
align-items: center !important; | |
margin-bottom: 1rem !important; | |
padding-bottom: 1rem !important; | |
border-bottom: 1px solid #f0c5b8 !important; | |
} | |
/* Modal Close Button */ | |
.modal-close-btn { | |
background: #f4e6e1 !important; | |
border: 1px solid #e8a491 !important; | |
color: #5a3a2a !important; | |
font-weight: 700 !important; | |
font-size: 1.2rem !important; | |
padding: 0.4rem 0.6rem !important; | |
border-radius: 6px !important; | |
min-width: 2rem !important; | |
height: 2rem !important; | |
align-items: center !important; | |
justify-content: center !important; | |
} | |
.modal-close-btn:hover { | |
background: #f0c5b8 !important; | |
} | |
/* Prompt Container */ | |
.prompt-container { | |
background: #fcf3f0 !important; | |
border: 1px solid #f0c5b8 !important; | |
border-radius: 8px !important; | |
max-height: 400px !important; | |
overflow-y: auto !important; | |
flex: 1 !important; | |
} | |
.generate-btn:hover { | |
background: #d96b47 !important; | |
transform: translateY(-1px) !important; | |
} | |
.generate-btn:active { | |
transform: translateY(0px) !important; | |
} | |
/* Paper Section Headers */ | |
.section-header { | |
background: #f9f0ec !important; | |
border: 1px solid #f0c5b8 !important; | |
border-radius: 8px !important; | |
padding: 0.7rem !important; | |
margin-bottom: 0.5rem !important; | |
} | |
/* Paper Inputs */ | |
input[type="text"], | |
input[type="number"], | |
textarea { | |
background: #fefcfa !important; | |
border: 2px solid #f4e6e1 !important; | |
border-radius: 8px !important; | |
padding: 0.8rem 1.2rem !important; | |
font-weight: 500 !important; | |
color: #5a3a2a !important; | |
font-family: 'Georgia', serif !important; | |
font-size: 1rem !important; | |
} | |
input[type="text"]:focus, | |
input[type="number"]:focus, | |
textarea:focus { | |
background: #fefcfa !important; | |
border-color: #e8a491 !important; | |
outline: none !important; | |
} | |
/* CRITICAL: Reset all problematic CSS for dropdowns */ | |
label:has(+ [data-testid="dropdown"]), | |
div:has([data-testid="dropdown"]), | |
[data-testid="dropdown"], | |
[data-testid="dropdown"] *, | |
.gradio-dropdown, | |
.gradio-dropdown * { | |
position: static !important; | |
transform: none !important; | |
backdrop-filter: none !important; | |
filter: none !important; | |
} | |
/* AGGRESSIVE FIX: Override ALL possible transparency sources */ | |
* { | |
--dropdown-bg: #ffffff !important; | |
--dropdown-opacity: 1 !important; | |
} | |
/* Target every possible dropdown element with maximum specificity */ | |
.gradio-container [data-testid="dropdown"] div[role="listbox"], | |
.gradio-container .gradio-dropdown .dropdown-content, | |
.gradio-container .dropdown-menu, | |
.gradio-container div[role="listbox"], | |
.gradio-container .svelte-1gfkn6j, | |
body [data-testid="dropdown"] div[role="listbox"], | |
body .dropdown-menu, | |
body div[role="listbox"], | |
html [data-testid="dropdown"] div[role="listbox"] { | |
background: #ffffff !important; | |
background-color: #ffffff !important; | |
opacity: 1 !important; | |
position: absolute !important; | |
z-index: 99999 !important; | |
border: 2px solid #d1d5db !important; | |
border-radius: 8px !important; | |
box-shadow: 0 8px 24px rgba(0, 0, 0, 0.25) !important; | |
max-height: 200px !important; | |
overflow-y: auto !important; | |
backdrop-filter: none !important; | |
filter: none !important; | |
background-image: none !important; | |
background-blend-mode: normal !important; | |
/* Force solid with CSS variables */ | |
background: var(--dropdown-bg, #ffffff) !important; | |
opacity: var(--dropdown-opacity, 1) !important; | |
} | |
/* Aggressive option styling */ | |
.gradio-container [data-testid="dropdown"] div[role="listbox"] > *, | |
.gradio-container .dropdown-menu > *, | |
.gradio-container div[role="listbox"] > *, | |
body [data-testid="dropdown"] div[role="listbox"] > *, | |
body .dropdown-menu > *, | |
body div[role="listbox"] > * { | |
background: #ffffff !important; | |
background-color: #ffffff !important; | |
padding: 0.75rem 1rem !important; | |
color: #1f2937 !important; | |
cursor: pointer !important; | |
opacity: 1 !important; | |
border: none !important; | |
margin: 0 !important; | |
display: block !important; | |
width: 100% !important; | |
text-align: left !important; | |
} | |
/* Ensure dropdown menus appear correctly with SOLID background */ | |
[data-testid="dropdown"] div[role="listbox"], | |
.gradio-dropdown .dropdown-content, | |
.dropdown-menu, | |
div[role="listbox"], | |
.svelte-1gfkn6j, | |
.gradio-container div[role="listbox"] { | |
position: absolute !important; | |
z-index: 9999 !important; | |
background: #ffffff !important; | |
background-color: #ffffff !important; | |
opacity: 1 !important; | |
border: 1px solid #d1d5db !important; | |
border-radius: 8px !important; | |
box-shadow: 0 4px 16px rgba(0, 0, 0, 0.15) !important; | |
max-height: 200px !important; | |
overflow-y: auto !important; | |
backdrop-filter: none !important; | |
/* Force solid background */ | |
background-image: none !important; | |
background-blend-mode: normal !important; | |
} | |
/* Dropdown option styling - SOLID background for each option */ | |
[data-testid="dropdown"] div[role="listbox"] > *, | |
.dropdown-menu > *, | |
div[role="listbox"] > *, | |
.svelte-1gfkn6j > * { | |
background: #ffffff !important; | |
background-color: #ffffff !important; | |
padding: 0.5rem 0.75rem !important; | |
color: #374151 !important; | |
cursor: pointer !important; | |
transition: background-color 0.2s ease !important; | |
opacity: 1 !important; | |
} | |
/* Dropdown option hover effect */ | |
[data-testid="dropdown"] div[role="listbox"] > *:hover, | |
.dropdown-menu > *:hover, | |
div[role="listbox"] > *:hover { | |
background: #f3f4f6 !important; | |
color: #1f2937 !important; | |
} | |
/* Dropdown option selected state */ | |
[data-testid="dropdown"] div[role="listbox"] > *[aria-selected="true"], | |
.dropdown-menu > *.selected, | |
div[role="listbox"] > *[aria-selected="true"] { | |
background: #e0e7ff !important; | |
color: #3730a3 !important; | |
} | |
/* Paper Code Areas */ | |
.code-container { | |
background: #f9f0ec !important; | |
border: 1px solid #f0c5b8 !important; | |
border-radius: 8px !important; | |
overflow: hidden !important; | |
} | |
/* Force text selection on ALL code elements */ | |
.code-container, | |
.code-container *, | |
.code-container textarea, | |
.code-container input, | |
.code-container .cm-editor, | |
.code-container .cm-content, | |
.code-container .cm-line { | |
user-select: text !important; | |
-webkit-user-select: text !important; | |
-moz-user-select: text !important; | |
-ms-user-select: text !important; | |
cursor: text !important; | |
} | |
/* Make selection visible */ | |
.code-container .cm-editor ::selection { | |
background: #3b82f6 !important; | |
color: white !important; | |
} | |
.code-container .cm-editor ::-moz-selection { | |
background: #3b82f6 !important; | |
color: white !important; | |
} | |
/* Make cursor visible */ | |
.code-container .cm-cursor { | |
border-color: #3b82f6 !important; | |
border-width: 2px !important; | |
opacity: 1 !important; | |
visibility: visible !important; | |
} | |
/* Ensure selection works in CodeMirror */ | |
.code-container .cm-selectionBackground { | |
background: #3b82f6 !important; | |
opacity: 0.3 !important; | |
} | |
/* Make sure focused state is visible */ | |
.code-container .cm-focused { | |
outline: 2px solid #3b82f6 !important; | |
outline-offset: 2px !important; | |
} | |
/* Code textbox styling */ | |
.code-container textarea { | |
background: #ffffff !important; | |
border: none !important; | |
border-radius: 4px !important; | |
font-family: 'SF Mono', 'Monaco', 'Inconsolata', 'Roboto Mono', 'Fira Code', monospace !important; | |
font-size: 14px !important; | |
line-height: 1.5 !important; | |
padding: 1.5rem !important; | |
color: #2d3748 !important; | |
font-weight: 500 !important; | |
resize: vertical !important; | |
white-space: pre !important; | |
overflow-wrap: normal !important; | |
word-break: normal !important; | |
} | |
/* Enable soft wrapping for code content */ | |
.code-container .cm-content { | |
white-space: pre-wrap !important; | |
padding: 1.5rem !important; | |
color: #5a3a2a !important; | |
font-size: 14px !important; | |
font-weight: 500 !important; | |
} | |
.code-container .cm-focused { | |
outline: none !important; | |
box-shadow: 0 0 0 2px rgba(59, 130, 246, 0.3) !important; | |
} | |
.code-container .cm-line { | |
padding-left: 0.5rem !important; | |
white-space: pre-wrap !important; | |
word-wrap: break-word !important; | |
overflow-wrap: break-word !important; | |
} | |
/* Force wrapping ONLY - NO SCROLLING */ | |
.code-container .cm-editor { | |
white-space: pre-wrap !important; | |
overflow-x: hidden !important; | |
} | |
.code-container .cm-scroller { | |
overflow-x: hidden !important; | |
width: 100% !important; | |
} | |
.code-container .cm-editor .cm-content { | |
white-space: pre-wrap !important; | |
word-break: break-all !important; | |
overflow-wrap: anywhere !important; | |
width: 100% !important; | |
max-width: 100% !important; | |
} | |
.code-container .cm-editor .cm-line { | |
white-space: pre-wrap !important; | |
word-break: break-all !important; | |
overflow-wrap: anywhere !important; | |
width: 100% !important; | |
max-width: 100% !important; | |
box-sizing: border-box !important; | |
} | |
/* Force the entire code container to have no horizontal overflow */ | |
.code-container, | |
.code-container * { | |
overflow-x: hidden !important; | |
max-width: 100% !important; | |
} | |
/* Moderate syntax highlighting for Python */ | |
.code-container .cm-keyword { color: #7c3aed !important; } | |
.code-container .cm-string { color: #059669 !important; } | |
.code-container .cm-comment { color: #6b7280 !important; font-style: italic !important; } | |
.code-container .cm-number { color: #dc2626 !important; } | |
.code-container .cm-variable { color: #1e40af !important; } | |
.code-container .cm-function { color: #7c2d12 !important; } | |
.code-container .cm-operator { color: #374151 !important; } | |
/* Paper Code header */ | |
.code-container label { | |
background: #f5e6e0 !important; | |
color: #5a3a2a !important; | |
padding: 0.75rem 1.25rem !important; | |
border-radius: 8px 8px 0 0 !important; | |
font-weight: 700 !important; | |
font-size: 1.1rem !important; | |
margin: 0 !important; | |
border: none !important; | |
border-bottom: 1px solid #f0c5b8 !important; | |
font-family: 'Georgia', serif !important; | |
} | |
/* Custom scrollbar for code area */ | |
.code-container .cm-scroller::-webkit-scrollbar { | |
width: 6px !important; | |
height: 6px !important; | |
} | |
.code-container .cm-scroller::-webkit-scrollbar-track { | |
background: rgba(243, 244, 246, 0.8) !important; | |
border-radius: 3px !important; | |
} | |
.code-container .cm-scroller::-webkit-scrollbar-thumb { | |
background: rgba(156, 163, 175, 0.8) !important; | |
border-radius: 3px !important; | |
} | |
.code-container .cm-scroller::-webkit-scrollbar-thumb:hover { | |
background: rgba(107, 114, 128, 0.9) !important; | |
} | |
/* Paper Line numbers */ | |
.code-container .cm-lineNumbers { | |
background: #f5e6e0 !important; | |
color: #b8847a !important; | |
border-right: 1px solid #f0c5b8 !important; | |
padding-right: 0.5rem !important; | |
} | |
.code-container .cm-lineNumbers .cm-gutterElement { | |
color: #b8847a !important; | |
font-weight: 400 !important; | |
} | |
/* Paper Memory Cards */ | |
.memory-card { | |
background: #fcf3f0 !important; | |
border: 1px solid #f4e6e1 !important; | |
border-radius: 12px !important; | |
padding: 0.7rem !important; | |
border-left: 4px solid #e8a491 !important; | |
} | |
/* Paper Labels */ | |
label { | |
font-weight: 600 !important; | |
color: #5a3a2a !important; | |
font-size: 1.1rem !important; | |
font-family: 'Georgia', serif !important; | |
} | |
/* Memory Analysis Spacing */ | |
.memory-analysis-spacing { | |
padding-top: 1rem !important; | |
border-top: 1px solid rgba(226, 232, 240, 0.6) !important; | |
} | |
/* FINAL OVERRIDE: Nuclear option for dropdown transparency */ | |
[role="listbox"] { | |
background: white !important; | |
opacity: 1 !important; | |
} | |
[role="listbox"] > * { | |
background: white !important; | |
opacity: 1 !important; | |
} | |
/* Gradio-specific nuclear option */ | |
.gradio-app [role="listbox"], | |
.gradio-app [role="listbox"] > * { | |
background: #ffffff !important; | |
background-color: #ffffff !important; | |
opacity: 1 !important; | |
} | |
/* Last resort: override all possible transparent backgrounds */ | |
div[style*="background"] { | |
background: unset !important; | |
} | |
[role="listbox"][style*="background"] { | |
background: #ffffff !important; | |
} | |
</script> | |
<script> | |
// Fix CodeMirror text selection and accordion titles after page loads | |
document.addEventListener('DOMContentLoaded', function() { | |
setTimeout(function() { | |
// Find all CodeMirror editors | |
const editors = document.querySelectorAll('.code-container .cm-editor'); | |
editors.forEach(editor => { | |
// Enable text selection on CodeMirror | |
editor.style.userSelect = 'text'; | |
editor.style.webkitUserSelect = 'text'; | |
editor.style.mozUserSelect = 'text'; | |
// Find the content area | |
const content = editor.querySelector('.cm-content'); | |
if (content) { | |
content.style.userSelect = 'text'; | |
content.style.webkitUserSelect = 'text'; | |
content.style.mozUserSelect = 'text'; | |
} | |
// Add event listeners to ensure selection works | |
editor.addEventListener('mousedown', function(e) { | |
e.stopPropagation(); | |
}); | |
}); | |
// Fix accordion title font sizes - focus on first span only | |
const accordionButtons = document.querySelectorAll('button.label-wrap, button[class*="label-wrap"]'); | |
accordionButtons.forEach(button => { | |
// Find and style the text span (first span child) - this is the most important part | |
const textSpan = button.querySelector('span:first-child'); | |
if (textSpan) { | |
textSpan.style.fontSize = '1.5rem'; | |
textSpan.style.fontWeight = '900'; | |
textSpan.style.fontFamily = 'Georgia, serif'; | |
textSpan.style.color = '#2d1810'; | |
textSpan.style.textShadow = '0 1px 2px rgba(0,0,0,0.2)'; | |
textSpan.style.letterSpacing = '0.5px'; | |
textSpan.style.lineHeight = '1.3'; | |
// Force override any existing styles | |
textSpan.style.setProperty('font-size', '1.5rem', 'important'); | |
textSpan.style.setProperty('font-weight', '900', 'important'); | |
textSpan.style.setProperty('font-family', 'Georgia, serif', 'important'); | |
} | |
}); | |
}, 1000); | |
}); | |
</script> | |
<style> | |
/* Specific accordion title styling - target the first span */ | |
button.label-wrap span:first-child, | |
button.label-wrap.svelte-1w6vloh span:first-child, | |
.label-wrap span:first-child, | |
button.label-wrap > span:first-child, | |
button[class*="label-wrap"] span:first-child { | |
font-size: 1.5rem !important; | |
font-weight: 900 !important; | |
color: #2d1810 !important; | |
font-family: 'Georgia', serif !important; | |
line-height: 1.3 !important; | |
text-shadow: 0 1px 2px rgba(0,0,0,0.2) !important; | |
letter-spacing: 0.5px !important; | |
text-transform: none !important; | |
} | |
/* Additional targeting for accordion buttons */ | |
button.label-wrap { | |
background: #f9f0ec !important; | |
padding: 0.7rem 1rem !important; | |
border-radius: 8px !important; | |
border: 1px solid #f0c5b8 !important; | |
} | |
button.label-wrap:hover { | |
background: #f5e6e0 !important; | |
} | |
/* Accordion title styling - focus on readability */ | |
.gradio-accordion .label-wrap, | |
.gradio-accordion summary, | |
.gradio-accordion details summary, | |
details summary, | |
details > summary, | |
[data-testid="accordion"] summary, | |
[data-testid="accordion"] .label-wrap { | |
font-weight: 900 !important; | |
color: #2d1810 !important; | |
font-family: 'Georgia', serif !important; | |
line-height: 1.3 !important; | |
text-shadow: 0 1px 2px rgba(0,0,0,0.1) !important; | |
padding: 1rem 1.5rem !important; | |
background: #f5f1e8 !important; | |
border-radius: 8px !important; | |
letter-spacing: 0.5px !important; | |
text-transform: none !important; | |
} | |
/* Even more specific accordion targeting */ | |
.gradio-container details summary, | |
.gradio-container .gradio-accordion summary, | |
.gradio-container [role="button"] { | |
font-weight: 900 !important; | |
color: #2d1810 !important; | |
font-family: 'Georgia', serif !important; | |
text-shadow: 0 1px 2px rgba(0,0,0,0.1) !important; | |
background: #f5f1e8 !important; | |
padding: 1rem 1.5rem !important; | |
border-radius: 8px !important; | |
letter-spacing: 0.5px !important; | |
} | |
/* Accordion hover states for better interaction */ | |
.gradio-accordion summary:hover, | |
details summary:hover { | |
background: #f0ead7 !important; | |
color: #1a0f08 !important; | |
} | |
/* Accordion container styling */ | |
.gradio-accordion, | |
details { | |
background: #fefdfb !important; | |
border: 2px solid #f0ede6 !important; | |
border-radius: 12px !important; | |
margin-bottom: 1rem !important; | |
} | |
.gradio-accordion[open], | |
details[open] { | |
border-color: #e8dcc6 !important; | |
background: #fefdfb !important; | |
} | |
/* Mobile Responsive Styles */ | |
@media (max-width: 768px) { | |
body, html, main { | |
padding: 5px !important; | |
margin: 0 !important; | |
} | |
.gradio-container { | |
padding: 5px !important; | |
margin: 0 !important; | |
} | |
/* Set left/right margins to 1px for mobile */ | |
.gradio-container > * { | |
margin-left: 1px !important; | |
margin-right: 1px !important; | |
} | |
/* Adjust hero header for mobile */ | |
.hero-header { | |
padding: 1rem 0.5rem !important; | |
margin-bottom: 1rem !important; | |
} | |
.hero-header h1 { | |
font-size: 1.8rem !important; | |
} | |
.hero-header h2 { | |
font-size: 1rem !important; | |
} | |
.hero-header p { | |
font-size: 0.9rem !important; | |
} | |
/* Mobile-friendly glass panels */ | |
.glass-panel { | |
margin: 0.5rem 0 !important; | |
padding: 1rem !important; | |
border-radius: 12px !important; | |
} | |
/* Responsive button sizing */ | |
.primary-button { | |
padding: 0.8rem 2rem !important; | |
font-size: 1rem !important; | |
} | |
/* Mobile code container */ | |
.code-container { | |
margin: 0 !important; | |
border-radius: 8px !important; | |
} | |
/* Stack columns on mobile */ | |
.gradio-row { | |
flex-direction: column !important; | |
} | |
} | |
/* Small mobile devices */ | |
@media (max-width: 480px) { | |
.main-container { | |
margin: 0 1px !important; | |
padding: 0.5rem !important; | |
} | |
.hero-header { | |
padding: 1.5rem 0.5rem !important; | |
} | |
.hero-header h1 { | |
font-size: 2rem !important; | |
} | |
.hero-header h2 { | |
font-size: 1.2rem !important; | |
} | |
.glass-panel { | |
padding: 0.8rem !important; | |
margin: 0.25rem 0 !important; | |
} | |
} | |
""" | |
) as interface: | |
with gr.Column(elem_classes="main-container"): | |
# Ultra Premium Header | |
gr.HTML(""" | |
<div class="hero-header" style="text-align: center; padding: 1rem;"> | |
<h1 style="color: #5a3a2a; font-size: 2rem; margin: 0 0 0.2rem 0; font-weight: 600; font-family: 'Georgia', serif;"> | |
✨ Auto Diffusers Config | |
</h1> | |
<h2 style="color: #8b5a4f; font-size: 1rem; margin: 0 0 0.5rem 0; font-weight: 500; font-style: italic; font-family: 'Georgia', serif;"> | |
Hardware-Optimized Code Generator | |
</h2> | |
<span style="display: inline-block; background: #fefcfa; padding: 0.3rem 0.8rem; border-radius: 8px; color: #5a3a2a; font-size: 0.85rem; border: 1px solid #f4e6e1; font-family: 'Georgia', serif; font-style: italic; font-weight: 500;"> | |
🤖 Powered by Google Gemini 2.5 | |
</span> | |
</div> | |
""") | |
# Main Content Area | |
# Hardware Selection Section | |
with gr.Group(elem_classes="glass-card"): | |
with gr.Accordion("⚙️ Hardware Specifications", open=True) as hardware_accordion: | |
gr.HTML(""" | |
<div class="section-header" style="text-align: center;"> | |
<p style="margin: 0; color: #6a443a; font-size: 1.1rem; font-weight: 600; font-family: 'Georgia', serif;"> | |
Configure your system hardware for optimal code generation | |
</p> | |
</div> | |
""") | |
# Platform, VRAM, and RAM in a single row | |
with gr.Row(): | |
platform = gr.Dropdown( | |
choices=["Linux", "Darwin", "Windows"], | |
label="🖥️ Platform", | |
value="Linux", | |
info="Your operating system" | |
) | |
vram_gb = gr.Number( | |
label="🎯 VRAM (GB)", | |
value=8, | |
minimum=0, | |
maximum=200, | |
info="GPU memory available" | |
) | |
ram_gb = gr.Number( | |
label="💻 System RAM (GB)", | |
value=16, | |
minimum=4, | |
maximum=256, | |
info="Total system memory" | |
) | |
# GPU configuration on separate lines | |
gpu_vendor = gr.Dropdown( | |
choices=[ | |
"Custom (Manual Input)", | |
"NVIDIA Consumer (GeForce RTX)", | |
"NVIDIA Professional (RTX A-Series)", | |
"NVIDIA Data Center", | |
"Apple Silicon", | |
"AMD", | |
"Intel", | |
"CPU Only" | |
], | |
label="🎮 GPU Vendor/Category", | |
value="Custom (Manual Input)", | |
info="Select your GPU category" | |
) | |
gpu_series = gr.Dropdown( | |
choices=[], | |
label="📊 GPU Series", | |
visible=False, | |
interactive=True, | |
info="Choose your GPU series" | |
) | |
gpu_model = gr.Dropdown( | |
choices=[], | |
label="🔧 GPU Model", | |
visible=False, | |
interactive=True, | |
info="Select your specific GPU model" | |
) | |
gpu_name = gr.Textbox( | |
label="Selected GPU", | |
visible=False | |
) | |
# Model Configuration Section | |
with gr.Group(elem_classes="glass-card"): | |
with gr.Accordion("🤖 Model Configuration", open=True) as model_accordion: | |
gr.HTML(""" | |
<div class="section-header" style="text-align: center;"> | |
<p style="margin: 0; color: #6a443a; font-size: 1.1rem; font-weight: 600; font-family: 'Georgia', serif;"> | |
Configure the AI model and generation parameters | |
</p> | |
</div> | |
""") | |
# Model Name - Full width on its own row | |
model_name = gr.Textbox( | |
label="🏷️ Model Name", | |
value="black-forest-labs/FLUX.1-schnell", | |
placeholder="e.g., black-forest-labs/FLUX.1-schnell", | |
info="HuggingFace model identifier" | |
) | |
# Other parameters in 4-column layout | |
with gr.Row(): | |
width = gr.Number( | |
label="📏 Width (px)", | |
value=1360, | |
minimum=256, | |
maximum=2048, | |
step=64, | |
info="Image width" | |
) | |
height = gr.Number( | |
label="📐 Height (px)", | |
value=768, | |
minimum=256, | |
maximum=2048, | |
step=64, | |
info="Image height" | |
) | |
dtype_selection = gr.Dropdown( | |
choices=["Auto", "float32", "float16", "bfloat16"], | |
label="⚡ dtype", | |
value="Auto", | |
info="Precision mode" | |
) | |
inference_steps = gr.Number( | |
label="🔄 Inf. Steps", | |
value=4, | |
minimum=1, | |
maximum=50, | |
info="Denoising steps" | |
) | |
memory_analysis_output = gr.Markdown( | |
value="✨ Select a model and configure your hardware to see memory requirements and optimization recommendations.", | |
elem_classes="memory-analysis-spacing" | |
) | |
# Generate Button | |
with gr.Row(): | |
with gr.Column(): | |
generate_btn = gr.Button( | |
"✨ Generate Optimized Code", | |
variant="primary", | |
size="lg", | |
elem_classes="generate-btn" | |
) | |
# Generated Code Section | |
with gr.Group(elem_classes="ultra-glass"): | |
# Hidden button to show prompt (only visible after generation) | |
with gr.Row(): | |
view_prompt_btn = gr.Button( | |
"🔍 View Actual Prompt Sent to Gemini", | |
variant="secondary", | |
size="sm", | |
visible=False, | |
elem_classes="view-prompt-btn" | |
) | |
# Code Output - back to Code component with better selection | |
code_output = gr.Code( | |
label="Generated Code", | |
language="python", | |
lines=20, | |
interactive=True, | |
show_label=True, | |
elem_classes="code-container", | |
show_line_numbers=False, | |
wrap_lines=True, | |
value="# Your optimized diffusion code will appear here after generation\n# Click 'Generate Optimized Code' to create hardware-specific Python code\n\nprint('Ready to generate AI art with optimized performance!')" | |
) | |
# Modal with CSS styling | |
with gr.Group(visible=False, elem_classes="modal-overlay") as prompt_modal: | |
with gr.Column(elem_classes="modal-content"): | |
with gr.Row(elem_classes="modal-header"): | |
gr.Markdown("### 📋 Actual Prompt Sent to Gemini API") | |
close_modal_btn = gr.Button("×", variant="secondary", size="sm", elem_classes="modal-close-btn") | |
prompt_display = gr.Code( | |
label="Full Prompt", | |
language="markdown", | |
lines=20, | |
interactive=False, | |
show_label=False, | |
elem_classes="prompt-container", | |
value="" | |
) | |
def on_gpu_vendor_change(vendor): | |
"""Handle GPU vendor selection and update series dropdown.""" | |
if vendor == "Custom (Manual Input)": | |
return (gr.update(visible=False, choices=[]), | |
gr.update(visible=False, choices=[]), | |
"", gr.update()) | |
elif vendor == "CPU Only": | |
return (gr.update(visible=False, choices=[]), | |
gr.update(visible=False, choices=[]), | |
"", 0) | |
elif vendor == "NVIDIA Consumer (GeForce RTX)": | |
return (gr.update(visible=True, choices=["RTX 50 Series", "RTX 40 Series", "RTX 30 Series"]), | |
gr.update(visible=False, choices=[]), | |
"", gr.update()) | |
elif vendor == "NVIDIA Professional (RTX A-Series)": | |
return (gr.update(visible=True, choices=["RTX A6000 Series", "RTX A5000 Series", "RTX A4000 Series"]), | |
gr.update(visible=False, choices=[]), | |
"", gr.update()) | |
elif vendor == "NVIDIA Data Center": | |
return (gr.update(visible=True, choices=["Blackwell (B-Series)", "Hopper (H-Series)", "Ada Lovelace (L-Series)", "Ampere (A-Series)", "Volta/Tesla"]), | |
gr.update(visible=False, choices=[]), | |
"", gr.update()) | |
elif vendor == "Apple Silicon": | |
return (gr.update(visible=True, choices=["M4 Series", "M3 Series", "M2 Series", "M1 Series"]), | |
gr.update(visible=False, choices=[]), | |
"", gr.update()) | |
elif vendor == "AMD": | |
return (gr.update(visible=True, choices=["Radeon RX 7000", "Radeon RX 6000", "Instinct MI Series"]), | |
gr.update(visible=False, choices=[]), | |
"", gr.update()) | |
elif vendor == "Intel": | |
return (gr.update(visible=True, choices=["Arc A-Series"]), | |
gr.update(visible=False, choices=[]), | |
"", gr.update()) | |
else: | |
return (gr.update(visible=False, choices=[]), | |
gr.update(visible=False, choices=[]), | |
"", gr.update()) | |
def on_gpu_series_change(vendor, series): | |
"""Handle GPU series selection and update model dropdown.""" | |
models = [] | |
if vendor == "NVIDIA Consumer (GeForce RTX)": | |
if series == "RTX 50 Series": | |
models = ["RTX 5090 (32GB)", "RTX 5080 (16GB)", "RTX 5070 Ti (16GB)", "RTX 5070 (12GB)", "RTX 5060 Ti (16GB)", "RTX 5060 (12GB)"] | |
elif series == "RTX 40 Series": | |
models = ["RTX 4090 (24GB)", "RTX 4080 Super (16GB)", "RTX 4070 Ti Super (16GB)", "RTX 4070 Super (12GB)", "RTX 4070 (12GB)", "RTX 4060 Ti (16GB)", "RTX 4060 Ti (8GB)", "RTX 4060 (8GB)"] | |
elif series == "RTX 30 Series": | |
models = ["RTX 3090 Ti (24GB)", "RTX 3090 (24GB)", "RTX 3080 Ti (12GB)", "RTX 3080 (12GB)", "RTX 3080 (10GB)", "RTX 3070 Ti (8GB)", "RTX 3070 (8GB)", "RTX 3060 Ti (8GB)", "RTX 3060 (12GB)"] | |
elif vendor == "NVIDIA Professional (RTX A-Series)": | |
if series == "RTX A6000 Series": | |
models = ["RTX A6000 (48GB)", "RTX A6000 Ada (48GB)", "RTX 6000 Ada (48GB)"] | |
elif series == "RTX A5000 Series": | |
models = ["RTX A5000 (24GB)", "RTX A5500 (24GB)", "RTX 5000 Ada (32GB)"] | |
elif series == "RTX A4000 Series": | |
models = ["RTX A4000 (16GB)", "RTX A4500 (20GB)", "RTX 4000 Ada (20GB)", "RTX 4000 SFF Ada (20GB)"] | |
elif vendor == "NVIDIA Data Center": | |
if series == "Blackwell (B-Series)": | |
models = ["B200 (192GB)", "B100 (192GB)", "GB200 NVL72 (192GB per GPU)"] | |
elif series == "Hopper (H-Series)": | |
models = ["H200 (141GB)", "H100 SXM (80GB)", "H100 PCIe (80GB)"] | |
elif series == "Ada Lovelace (L-Series)": | |
models = ["L40S (48GB)", "L40 (48GB)", "L4 (24GB)"] | |
elif series == "Ampere (A-Series)": | |
models = ["A100 SXM (80GB)", "A100 PCIe (80GB)", "A100 PCIe (40GB)", "A40 (48GB)", "A30 (24GB)", "A16 (16GB)", "A10 (24GB)"] | |
elif series == "Volta/Tesla": | |
models = ["V100 SXM2 (32GB)", "V100 PCIe (16GB)", "P100 (16GB)"] | |
elif vendor == "Apple Silicon": | |
if series == "M4 Series": | |
models = ["M4 Max (128GB Unified)", "M4 Pro (64GB Unified)", "M4 (32GB Unified)"] | |
elif series == "M3 Series": | |
models = ["M3 Ultra (192GB Unified)", "M3 Max (128GB Unified)", "M3 Pro (36GB Unified)", "M3 (24GB Unified)"] | |
elif series == "M2 Series": | |
models = ["M2 Ultra (192GB Unified)", "M2 Max (96GB Unified)", "M2 Pro (32GB Unified)", "M2 (24GB Unified)"] | |
elif series == "M1 Series": | |
models = ["M1 Ultra (128GB Unified)", "M1 Max (64GB Unified)", "M1 Pro (32GB Unified)", "M1 (16GB Unified)"] | |
elif vendor == "AMD": | |
if series == "Radeon RX 7000": | |
models = ["RX 7900 XTX (24GB)", "RX 7900 XT (20GB)"] | |
elif series == "Radeon RX 6000": | |
models = ["RX 6900 XT (16GB)"] | |
elif series == "Instinct MI Series": | |
models = ["Instinct MI300X (192GB)", "Instinct MI250X (128GB)", "Instinct MI100 (32GB)"] | |
elif vendor == "Intel": | |
if series == "Arc A-Series": | |
models = ["Arc A770 (16GB)", "Arc A750 (8GB)"] | |
return gr.update(visible=True, choices=models) | |
def on_gpu_model_change(model): | |
"""Handle GPU model selection and auto-fill values.""" | |
if not model or model == "": | |
return "", gr.update() | |
# Extract GPU name and VRAM from model | |
if "(" in model and "GB" in model: | |
gpu_name_part = model.split(" (")[0] | |
vram_part = model.split("(")[1].split("GB")[0] | |
try: | |
vram_value = int(vram_part) | |
except: | |
vram_value = 8 | |
return gpu_name_part, vram_value | |
else: | |
return model, gr.update() | |
def get_final_gpu_name(vendor, series, model): | |
"""Get the final GPU name based on vendor selection.""" | |
if vendor == "Custom (Manual Input)": | |
return "Custom GPU" | |
elif vendor == "CPU Only": | |
return "" | |
elif model and "(" in model and "GB" in model: | |
return model.split(" (")[0] | |
elif model: | |
return model | |
else: | |
return vendor if vendor != "Custom (Manual Input)" else "Custom GPU" | |
def update_hardware_accordion_title(platform, gpu_vendor, gpu_model, vram_gb, ram_gb): | |
"""Update hardware accordion title with current configuration.""" | |
final_gpu = get_final_gpu_name(gpu_vendor, "", gpu_model) | |
if not final_gpu: | |
final_gpu = gpu_vendor if gpu_vendor != "Custom (Manual Input)" else "Custom GPU" | |
# Extract GPU name and VRAM for cleaner display | |
gpu_display = final_gpu | |
if gpu_model and "(" in gpu_model and "GB" in gpu_model: | |
# Extract clean GPU name with VRAM from model selection | |
gpu_display = gpu_model | |
elif final_gpu and vram_gb: | |
gpu_display = f"{final_gpu} ({vram_gb}GB)" | |
return f"⚙️ Hardware: {platform} | {gpu_display} | {ram_gb}GB RAM" | |
def update_model_accordion_title(model_name, dtype_selection, width, height, inference_steps, memory_analysis_text=""): | |
"""Update model accordion title with current configuration including memory info.""" | |
model_short = model_name.split("/")[-1] if "/" in model_name else model_name | |
dtype_short = dtype_selection | |
# Extract memory info for title | |
memory_info = "" | |
if memory_analysis_text and not memory_analysis_text.startswith("Select a model") and "Error" not in memory_analysis_text: | |
lines = memory_analysis_text.split('\n') | |
for line in lines: | |
if "Memory Requirements:" in line or "estimated" in line.lower(): | |
if "GB" in line: | |
import re | |
gb_match = re.search(r'(\d+\.?\d*)\s*GB', line) | |
if gb_match: | |
memory_info = f" | {gb_match.group(1)}GB req" | |
break | |
return f"🤖 Model: {model_short} | {dtype_short} | {width}×{height} | {inference_steps} steps{memory_info}" | |
# State to track last processed model name | |
last_processed_model = gr.State(value="") | |
def update_memory_analysis(model_name, vram_gb, last_model): | |
"""Update memory analysis based on selections.""" | |
if not model_name or not model_name.strip(): | |
return "Select a model to see memory requirements.", "" | |
# Check if model name has actually changed | |
if model_name == last_model and last_model != "": | |
# Return current analysis without API call (model hasn't changed) | |
return gr.update(), last_model | |
if not vram_gb or vram_gb <= 0: | |
return f"**Model:** {model_name}\n\nConfigure your GPU to see memory analysis.", model_name | |
try: | |
memory_info, recommendations, formatted_info = app.analyze_model_memory(model_name, vram_gb) | |
# Check if there was an error in the memory analysis | |
if isinstance(memory_info, dict) and 'error' in memory_info: | |
# Extract just the core error message | |
error_msg = str(memory_info['error']) | |
if "Error analyzing model memory:" in error_msg: | |
error_msg = error_msg.replace("Error analyzing model memory:", "").strip() | |
return f"**Note:** {error_msg} (API error)", model_name | |
return formatted_info, model_name | |
except Exception as e: | |
# Simple error message for any other exceptions | |
error_msg = str(e) | |
return f"**Note:** {error_msg} (API error)", model_name | |
# Connect GPU dropdown change handlers with memory analysis updates | |
gpu_vendor.change( | |
on_gpu_vendor_change, | |
inputs=[gpu_vendor], | |
outputs=[gpu_series, gpu_model, gpu_name, vram_gb] | |
).then( | |
update_memory_analysis, | |
inputs=[model_name, vram_gb, last_processed_model], | |
outputs=[memory_analysis_output, last_processed_model] | |
) | |
gpu_series.change( | |
on_gpu_series_change, | |
inputs=[gpu_vendor, gpu_series], | |
outputs=[gpu_model] | |
) | |
gpu_model.change( | |
on_gpu_model_change, | |
inputs=[gpu_model], | |
outputs=[gpu_name, vram_gb] | |
).then( | |
update_memory_analysis, | |
inputs=[model_name, vram_gb, last_processed_model], | |
outputs=[memory_analysis_output, last_processed_model] | |
) | |
# Update memory analysis when model name is submitted (Enter) or loses focus, or VRAM changes | |
model_name.submit( | |
update_memory_analysis, | |
inputs=[model_name, vram_gb, last_processed_model], | |
outputs=[memory_analysis_output, last_processed_model] | |
) | |
model_name.blur( | |
update_memory_analysis, | |
inputs=[model_name, vram_gb, last_processed_model], | |
outputs=[memory_analysis_output, last_processed_model] | |
) | |
vram_gb.change( | |
update_memory_analysis, | |
inputs=[model_name, vram_gb, last_processed_model], | |
outputs=[memory_analysis_output, last_processed_model] | |
) | |
# Load initial memory analysis on startup | |
interface.load( | |
update_memory_analysis, | |
inputs=[model_name, vram_gb, last_processed_model], | |
outputs=[memory_analysis_output, last_processed_model] | |
) | |
# Create wrapper functions that return gr.update for accordion labels | |
def update_hardware_accordion(platform, gpu_vendor, gpu_model, vram_gb, ram_gb): | |
title = update_hardware_accordion_title(platform, gpu_vendor, gpu_model, vram_gb, ram_gb) | |
return gr.update(label=title) | |
def update_model_accordion(model_name, dtype_selection, width, height, inference_steps, memory_analysis_text=""): | |
title = update_model_accordion_title(model_name, dtype_selection, width, height, inference_steps, memory_analysis_text) | |
return gr.update(label=title) | |
# Load initial accordion titles on startup | |
interface.load( | |
update_hardware_accordion, | |
inputs=[platform, gpu_vendor, gpu_model, vram_gb, ram_gb], | |
outputs=hardware_accordion | |
) | |
interface.load( | |
update_model_accordion, | |
inputs=[model_name, dtype_selection, width, height, inference_steps, memory_analysis_output], | |
outputs=model_accordion | |
) | |
# Accordion title update event handlers | |
# Hardware accordion title updates | |
for component in [platform, gpu_vendor, gpu_model, vram_gb, ram_gb]: | |
component.change( | |
update_hardware_accordion, | |
inputs=[platform, gpu_vendor, gpu_model, vram_gb, ram_gb], | |
outputs=hardware_accordion | |
) | |
# Model accordion title updates (including memory analysis) | |
model_name.submit( | |
update_model_accordion, | |
inputs=[model_name, dtype_selection, width, height, inference_steps, memory_analysis_output], | |
outputs=model_accordion | |
) | |
model_name.blur( | |
update_model_accordion, | |
inputs=[model_name, dtype_selection, width, height, inference_steps, memory_analysis_output], | |
outputs=model_accordion | |
) | |
for component in [dtype_selection, width, height, inference_steps]: | |
component.change( | |
update_model_accordion, | |
inputs=[model_name, dtype_selection, width, height, inference_steps, memory_analysis_output], | |
outputs=model_accordion | |
) | |
# Update model accordion when memory analysis changes | |
memory_analysis_output.change( | |
update_model_accordion, | |
inputs=[model_name, dtype_selection, width, height, inference_steps, memory_analysis_output], | |
outputs=model_accordion | |
) | |
def create_code_summary(generated_code, model_name, final_gpu_name, vram_gb): | |
"""Create a concise summary of the generated code.""" | |
if generated_code.startswith("Error"): | |
return "❌ **Code Generation Failed** - See error details in the code output below." | |
# Analyze the generated code to extract key optimizations | |
optimizations = [] | |
if "torch.float16" in generated_code or "fp16" in generated_code.lower(): | |
optimizations.append("FP16 precision") | |
if "torch.bfloat16" in generated_code or "bf16" in generated_code.lower(): | |
optimizations.append("BF16 precision") | |
if "enable_model_cpu_offload" in generated_code: | |
optimizations.append("CPU offloading") | |
if "enable_sequential_cpu_offload" in generated_code: | |
optimizations.append("Sequential CPU offload") | |
if "low_cpu_mem_usage=True" in generated_code: | |
optimizations.append("Low CPU memory usage") | |
if "torch.compile" in generated_code: | |
optimizations.append("Torch compile") | |
if "attention_slicing" in generated_code: | |
optimizations.append("Attention slicing") | |
if "vae_slicing" in generated_code: | |
optimizations.append("VAE slicing") | |
device = "CUDA" if "cuda" in generated_code else "MPS" if "mps" in generated_code else "CPU" | |
summary = f""" | |
### ✅ Code Generated Successfully | |
**Model:** `{model_name}` | |
**Hardware:** {final_gpu_name} ({vram_gb}GB) - {device} | |
**Optimizations:** {', '.join(optimizations) if optimizations else 'Standard configuration'} | |
**Key Features:** | |
- Memory-optimized pipeline loading | |
- Hardware-specific device configuration | |
- Performance tuning for your GPU | |
- Ready-to-run diffusion code | |
""" | |
return summary | |
def strip_comments(code): | |
"""Remove all comments from the code for collapsed view.""" | |
if not code: | |
return code | |
lines = code.split('\n') | |
filtered_lines = [] | |
for line in lines: | |
stripped = line.strip() | |
# Skip comment-only lines and empty lines | |
if stripped.startswith('#') or stripped == '': | |
continue | |
# For lines with inline comments, keep only the code part | |
if '#' in line and not stripped.startswith('#'): | |
code_part = line.split('#')[0].rstrip() | |
if code_part.strip(): # Only add if there's actual code | |
filtered_lines.append(code_part) | |
else: | |
filtered_lines.append(line) | |
return '\n'.join(filtered_lines) | |
def generate_with_combined_gpu_name(gpu_vendor, gpu_series, gpu_model, vram_gb, ram_gb, platform, model_name, dtype_selection, width, height, inference_steps): | |
"""Generate code with the correct GPU name from multi-level selection, including memory analysis.""" | |
final_gpu_name = get_final_gpu_name(gpu_vendor, gpu_series, gpu_model) | |
# Constant prompt text | |
prompt_text = "A cat holding a sign that says hello world" | |
# STEP 1: Perform memory analysis BEFORE code generation | |
memory_analysis_data = None | |
memory_header = "" | |
try: | |
if model_name and vram_gb and vram_gb > 0: | |
memory_info, recommendations, _ = app.analyze_model_memory(model_name, vram_gb) | |
# Package memory analysis for Gemini API | |
memory_analysis_data = { | |
'memory_info': memory_info, | |
'recommendations': recommendations | |
} | |
# Create header for the generated code | |
def get_optimization_strategy(recommendations): | |
"""Generate optimization strategy text based on recommendations.""" | |
strategies = [] | |
if recommendations.get('cpu_offload'): | |
strategies.append("CPU offloading") | |
if recommendations.get('sequential_offload'): | |
strategies.append("Sequential CPU offload") | |
if recommendations.get('attention_slicing'): | |
strategies.append("Attention slicing") | |
if recommendations.get('vae_slicing'): | |
strategies.append("VAE slicing") | |
precision = recommendations.get('recommended_precision', 'float16') | |
if precision: | |
strategies.append(f"{precision} precision") | |
if not strategies: | |
# No special optimizations needed | |
if recommendations.get('recommendations') and any('Full model can fit' in rec for rec in recommendations.get('recommendations', [])): | |
return "Full VRAM utilization with optimal performance" | |
else: | |
return "Standard optimization" | |
return ", ".join(strategies) | |
optimization_strategy = get_optimization_strategy(recommendations) | |
memory_header = f"""# Memory Analysis for {model_name}: | |
# GPU: {final_gpu_name if final_gpu_name else 'Not specified'} ({vram_gb}GB VRAM) | |
# Model Memory Requirements: {memory_info.get('estimated_inference_memory_fp16_gb', 'Unknown')} GB | |
# Recommendation: {', '.join(recommendations.get('recommendations', ['N/A']))} | |
# Optimization Strategy: {optimization_strategy} | |
""" | |
except Exception as e: | |
memory_header = f"""# Memory Analysis for {model_name}: | |
# GPU: {final_gpu_name if final_gpu_name else 'Not specified'} ({vram_gb}GB VRAM) | |
# Note: Memory analysis failed - {str(e)} | |
""" | |
# STEP 2: Get the actual prompt that will be sent to Gemini | |
actual_prompt = app.get_generation_prompt( | |
model_name, prompt_text, (int(height), int(width)), int(inference_steps), | |
{ | |
'platform': platform, | |
'architecture': 'manual_input', | |
'cpu_count': 8, | |
'python_version': '3.11', | |
'cuda_available': 'nvidia' in final_gpu_name.lower() if final_gpu_name else False, | |
'mps_available': platform == 'Darwin' and 'apple' in final_gpu_name.lower() if final_gpu_name else False, | |
'torch_version': '2.0+', | |
'manual_input': True, | |
'ram_gb': int(ram_gb) if ram_gb else 16, | |
'user_dtype': f"torch.{dtype_selection}" if dtype_selection != "Auto" else None, | |
'gpu_info': [{'name': final_gpu_name, 'memory_mb': int(vram_gb) * 1024}] if final_gpu_name and vram_gb else None | |
}, | |
optimization_profile="balanced" | |
) | |
# STEP 3: Generate the optimized code WITH memory analysis information | |
generated_code = app.generate_code_with_manual_specs( | |
final_gpu_name, vram_gb, ram_gb, platform, | |
model_name, prompt_text, dtype_selection, width, height, inference_steps, | |
memory_analysis_data | |
) | |
# STEP 4: Prepend memory analysis header to the generated code | |
final_code = memory_header + generated_code if memory_header and not generated_code.startswith("Error") else generated_code | |
# STEP 5: Create code summary | |
summary = create_code_summary(generated_code, model_name, final_gpu_name, vram_gb) | |
return summary, final_code, actual_prompt | |
# Add states for tracking code view and storing full code | |
code_collapsed = gr.State(value=False) | |
full_code_storage = gr.State(value="") | |
prompt_storage = gr.State(value="") | |
def generate_and_store_code(gpu_vendor, gpu_series, gpu_model, vram_gb, ram_gb, platform, model_name, dtype_selection, width, height, inference_steps): | |
"""Generate code and return code for display and full code for storage.""" | |
summary, full_code, actual_prompt = generate_with_combined_gpu_name( | |
gpu_vendor, gpu_series, gpu_model, vram_gb, ram_gb, platform, | |
model_name, dtype_selection, width, height, inference_steps | |
) | |
# Show the view prompt button after generation and close accordions | |
return (full_code, full_code, False, gr.update(visible=True), actual_prompt, | |
gr.update(open=False), gr.update(open=False)) # display_code, stored_code, reset_collapsed_state, show_button, prompt, close_hardware_accordion, close_model_accordion | |
# Modal functions with CSS class control | |
def show_modal(prompt): | |
return gr.update(visible=True, elem_classes="modal-overlay visible"), prompt | |
def hide_modal(): | |
print("Close modal button clicked!") # Debug | |
return gr.update(visible=False, elem_classes="modal-overlay"), "" | |
# Generate button click | |
generate_btn.click( | |
generate_and_store_code, | |
inputs=[ | |
gpu_vendor, gpu_series, gpu_model, vram_gb, ram_gb, platform, | |
model_name, dtype_selection, width, height, inference_steps | |
], | |
outputs=[code_output, full_code_storage, code_collapsed, view_prompt_btn, prompt_storage, hardware_accordion, model_accordion] | |
) | |
# View prompt button click | |
view_prompt_btn.click( | |
show_modal, | |
inputs=[prompt_storage], | |
outputs=[prompt_modal, prompt_display] | |
) | |
# Close modal button click | |
close_modal_btn.click( | |
hide_modal, | |
outputs=[prompt_modal, prompt_display] | |
) | |
return interface | |
def main(): | |
"""Launch the Gradio application.""" | |
try: | |
interface = create_gradio_interface() | |
interface.launch( | |
server_name="0.0.0.0", | |
server_port=7860, | |
share=True, | |
show_error=True | |
) | |
except Exception as e: | |
print(f"Error launching Gradio app: {e}") | |
print("Make sure you have set GOOGLE_API_KEY in your .env file") | |
if __name__ == "__main__": | |
main() |