Spaces:
Running
Running
Upload folder using huggingface_hub
Browse files- .claude/settings.local.json +4 -1
- auto_diffusers.log +0 -0
- gradio_app.py +55 -36
.claude/settings.local.json
CHANGED
@@ -72,7 +72,10 @@
|
|
72 |
"Bash(sed -i '' '1540s/.*/ print(\"Close modal button clicked!\") # Debug/' /Users/deep-diver/Developers/auto-diffusers/gradio_app.py)",
|
73 |
"Bash(GRADIO_SERVER_PORT=7861 python gradio_app.py)",
|
74 |
"Bash(lsof -ti:7860)",
|
75 |
-
"Bash(kill 46516 80145)"
|
|
|
|
|
|
|
76 |
],
|
77 |
"deny": []
|
78 |
},
|
|
|
72 |
"Bash(sed -i '' '1540s/.*/ print(\"Close modal button clicked!\") # Debug/' /Users/deep-diver/Developers/auto-diffusers/gradio_app.py)",
|
73 |
"Bash(GRADIO_SERVER_PORT=7861 python gradio_app.py)",
|
74 |
"Bash(lsof -ti:7860)",
|
75 |
+
"Bash(kill 46516 80145)",
|
76 |
+
"Bash(grep -n \"model_name\" gradio_app.py)",
|
77 |
+
"Bash(grep -n -A2 -B2 \"model_name\" gradio_app.py)",
|
78 |
+
"Bash(timeout 20 python gradio_app.py)"
|
79 |
],
|
80 |
"deny": []
|
81 |
},
|
auto_diffusers.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
gradio_app.py
CHANGED
@@ -1224,37 +1224,38 @@ def create_gradio_interface():
|
|
1224 |
return f"🤖 Model: {model_short} | {dtype_short} | {width}×{height} | {inference_steps} steps{memory_info}"
|
1225 |
|
1226 |
|
1227 |
-
|
1228 |
-
|
|
|
|
|
|
|
1229 |
if not model_name or not model_name.strip():
|
1230 |
-
return "Select a model to see memory requirements."
|
|
|
|
|
|
|
|
|
|
|
1231 |
|
1232 |
if not vram_gb or vram_gb <= 0:
|
1233 |
-
return f"**Model:** {model_name}\n\nConfigure your GPU to see memory analysis."
|
1234 |
|
1235 |
try:
|
1236 |
memory_info, recommendations, formatted_info = app.analyze_model_memory(model_name, vram_gb)
|
1237 |
-
return formatted_info
|
1238 |
-
except Exception as e:
|
1239 |
-
# Enhanced error reporting with full traceback
|
1240 |
-
import traceback
|
1241 |
-
error_details = traceback.format_exc()
|
1242 |
-
print(f"Memory analysis error for {model_name}: {error_details}")
|
1243 |
|
1244 |
-
#
|
1245 |
-
|
1246 |
-
|
1247 |
-
|
1248 |
-
|
1249 |
-
|
1250 |
-
|
1251 |
-
detailed_error = f"❌ **Access Denied**\n\nModel: `{model_name}`\n\n**Issue:** This model is private or requires authentication.\n\n**Suggestion:** Use a public model or check access permissions."
|
1252 |
-
elif "timeout" in error_msg.lower():
|
1253 |
-
detailed_error = f"❌ **Timeout Error**\n\nModel: `{model_name}`\n\n**Issue:** HuggingFace API is slow or unresponsive.\n\n**Suggestion:** Try again in a moment."
|
1254 |
-
else:
|
1255 |
-
detailed_error = f"❌ **Memory Analysis Error**\n\nModel: `{model_name}`\n\n**Error Type:** {type(e).__name__}\n\n**Details:** {error_msg}\n\n**Full Error:**\n```\n{error_details}\n```"
|
1256 |
|
1257 |
-
return
|
|
|
|
|
|
|
|
|
1258 |
|
1259 |
# Connect GPU dropdown change handlers with memory analysis updates
|
1260 |
gpu_vendor.change(
|
@@ -1263,8 +1264,8 @@ def create_gradio_interface():
|
|
1263 |
outputs=[gpu_series, gpu_model, gpu_name, vram_gb]
|
1264 |
).then(
|
1265 |
update_memory_analysis,
|
1266 |
-
inputs=[model_name, vram_gb],
|
1267 |
-
outputs=memory_analysis_output
|
1268 |
)
|
1269 |
|
1270 |
gpu_series.change(
|
@@ -1279,29 +1280,35 @@ def create_gradio_interface():
|
|
1279 |
outputs=[gpu_name, vram_gb]
|
1280 |
).then(
|
1281 |
update_memory_analysis,
|
1282 |
-
inputs=[model_name, vram_gb],
|
1283 |
-
outputs=memory_analysis_output
|
1284 |
)
|
1285 |
|
1286 |
|
1287 |
-
# Update memory analysis when model name or VRAM changes
|
1288 |
-
model_name.
|
|
|
|
|
|
|
|
|
|
|
|
|
1289 |
update_memory_analysis,
|
1290 |
-
inputs=[model_name, vram_gb],
|
1291 |
-
outputs=memory_analysis_output
|
1292 |
)
|
1293 |
|
1294 |
vram_gb.change(
|
1295 |
update_memory_analysis,
|
1296 |
-
inputs=[model_name, vram_gb],
|
1297 |
-
outputs=memory_analysis_output
|
1298 |
)
|
1299 |
|
1300 |
# Load initial memory analysis on startup
|
1301 |
interface.load(
|
1302 |
update_memory_analysis,
|
1303 |
-
inputs=[model_name, vram_gb],
|
1304 |
-
outputs=memory_analysis_output
|
1305 |
)
|
1306 |
|
1307 |
# Create wrapper functions that return gr.update for accordion labels
|
@@ -1337,7 +1344,19 @@ def create_gradio_interface():
|
|
1337 |
)
|
1338 |
|
1339 |
# Model accordion title updates (including memory analysis)
|
1340 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1341 |
component.change(
|
1342 |
update_model_accordion,
|
1343 |
inputs=[model_name, dtype_selection, width, height, inference_steps, memory_analysis_output],
|
|
|
1224 |
return f"🤖 Model: {model_short} | {dtype_short} | {width}×{height} | {inference_steps} steps{memory_info}"
|
1225 |
|
1226 |
|
1227 |
+
# State to track last processed model name
|
1228 |
+
last_processed_model = gr.State(value="")
|
1229 |
+
|
1230 |
+
def update_memory_analysis(model_name, vram_gb, last_model):
|
1231 |
+
"""Update memory analysis based on selections."""
|
1232 |
if not model_name or not model_name.strip():
|
1233 |
+
return "Select a model to see memory requirements.", ""
|
1234 |
+
|
1235 |
+
# Check if model name has actually changed
|
1236 |
+
if model_name == last_model and last_model != "":
|
1237 |
+
# Return current analysis without API call (model hasn't changed)
|
1238 |
+
return gr.update(), last_model
|
1239 |
|
1240 |
if not vram_gb or vram_gb <= 0:
|
1241 |
+
return f"**Model:** {model_name}\n\nConfigure your GPU to see memory analysis.", model_name
|
1242 |
|
1243 |
try:
|
1244 |
memory_info, recommendations, formatted_info = app.analyze_model_memory(model_name, vram_gb)
|
|
|
|
|
|
|
|
|
|
|
|
|
1245 |
|
1246 |
+
# Check if there was an error in the memory analysis
|
1247 |
+
if isinstance(memory_info, dict) and 'error' in memory_info:
|
1248 |
+
# Extract just the core error message
|
1249 |
+
error_msg = str(memory_info['error'])
|
1250 |
+
if "Error analyzing model memory:" in error_msg:
|
1251 |
+
error_msg = error_msg.replace("Error analyzing model memory:", "").strip()
|
1252 |
+
return f"**Note:** {error_msg} (API error)", model_name
|
|
|
|
|
|
|
|
|
|
|
1253 |
|
1254 |
+
return formatted_info, model_name
|
1255 |
+
except Exception as e:
|
1256 |
+
# Simple error message for any other exceptions
|
1257 |
+
error_msg = str(e)
|
1258 |
+
return f"**Note:** {error_msg} (API error)", model_name
|
1259 |
|
1260 |
# Connect GPU dropdown change handlers with memory analysis updates
|
1261 |
gpu_vendor.change(
|
|
|
1264 |
outputs=[gpu_series, gpu_model, gpu_name, vram_gb]
|
1265 |
).then(
|
1266 |
update_memory_analysis,
|
1267 |
+
inputs=[model_name, vram_gb, last_processed_model],
|
1268 |
+
outputs=[memory_analysis_output, last_processed_model]
|
1269 |
)
|
1270 |
|
1271 |
gpu_series.change(
|
|
|
1280 |
outputs=[gpu_name, vram_gb]
|
1281 |
).then(
|
1282 |
update_memory_analysis,
|
1283 |
+
inputs=[model_name, vram_gb, last_processed_model],
|
1284 |
+
outputs=[memory_analysis_output, last_processed_model]
|
1285 |
)
|
1286 |
|
1287 |
|
1288 |
+
# Update memory analysis when model name is submitted (Enter) or loses focus, or VRAM changes
|
1289 |
+
model_name.submit(
|
1290 |
+
update_memory_analysis,
|
1291 |
+
inputs=[model_name, vram_gb, last_processed_model],
|
1292 |
+
outputs=[memory_analysis_output, last_processed_model]
|
1293 |
+
)
|
1294 |
+
|
1295 |
+
model_name.blur(
|
1296 |
update_memory_analysis,
|
1297 |
+
inputs=[model_name, vram_gb, last_processed_model],
|
1298 |
+
outputs=[memory_analysis_output, last_processed_model]
|
1299 |
)
|
1300 |
|
1301 |
vram_gb.change(
|
1302 |
update_memory_analysis,
|
1303 |
+
inputs=[model_name, vram_gb, last_processed_model],
|
1304 |
+
outputs=[memory_analysis_output, last_processed_model]
|
1305 |
)
|
1306 |
|
1307 |
# Load initial memory analysis on startup
|
1308 |
interface.load(
|
1309 |
update_memory_analysis,
|
1310 |
+
inputs=[model_name, vram_gb, last_processed_model],
|
1311 |
+
outputs=[memory_analysis_output, last_processed_model]
|
1312 |
)
|
1313 |
|
1314 |
# Create wrapper functions that return gr.update for accordion labels
|
|
|
1344 |
)
|
1345 |
|
1346 |
# Model accordion title updates (including memory analysis)
|
1347 |
+
model_name.submit(
|
1348 |
+
update_model_accordion,
|
1349 |
+
inputs=[model_name, dtype_selection, width, height, inference_steps, memory_analysis_output],
|
1350 |
+
outputs=model_accordion
|
1351 |
+
)
|
1352 |
+
|
1353 |
+
model_name.blur(
|
1354 |
+
update_model_accordion,
|
1355 |
+
inputs=[model_name, dtype_selection, width, height, inference_steps, memory_analysis_output],
|
1356 |
+
outputs=model_accordion
|
1357 |
+
)
|
1358 |
+
|
1359 |
+
for component in [dtype_selection, width, height, inference_steps]:
|
1360 |
component.change(
|
1361 |
update_model_accordion,
|
1362 |
inputs=[model_name, dtype_selection, width, height, inference_steps, memory_analysis_output],
|