Spaces:
Runtime error
Runtime error
lionelgarnier
commited on
Commit
·
34046eb
1
Parent(s):
461fc5b
remove model preloading functionality and related code
Browse files
app.py
CHANGED
@@ -26,7 +26,6 @@ login(token=hf_token)
|
|
26 |
# Global constants and default values
|
27 |
MAX_SEED = np.iinfo(np.int32).max
|
28 |
MAX_IMAGE_SIZE = 2048
|
29 |
-
PRELOAD_MODELS = True # Set to True to preload models at startup
|
30 |
|
31 |
# Default system prompt for text generation
|
32 |
DEFAULT_SYSTEM_PROMPT = """You are a product designer with strong knowledge in text-to-image generation. You will receive a product request in the form of a brief description, and your mission will be to imagine a new product design that meets this need.
|
@@ -244,35 +243,6 @@ css="""
|
|
244 |
}
|
245 |
"""
|
246 |
|
247 |
-
def preload_models():
|
248 |
-
print("Preloading models...")
|
249 |
-
text_success = get_text_gen_pipeline() is not None
|
250 |
-
image_success = get_image_gen_pipeline() is not None
|
251 |
-
trellis_success = get_trellis_pipeline() is not None
|
252 |
-
|
253 |
-
success = text_success and image_success and trellis_success
|
254 |
-
|
255 |
-
status_parts = []
|
256 |
-
if text_success:
|
257 |
-
status_parts.append("Mistral ✓")
|
258 |
-
else:
|
259 |
-
status_parts.append("Mistral ✗")
|
260 |
-
|
261 |
-
if image_success:
|
262 |
-
status_parts.append("Flux ✓")
|
263 |
-
else:
|
264 |
-
status_parts.append("Flux ✗")
|
265 |
-
|
266 |
-
if trellis_success:
|
267 |
-
status_parts.append("Trellis ✓")
|
268 |
-
else:
|
269 |
-
status_parts.append("Trellis ✗")
|
270 |
-
|
271 |
-
status = f"Models loaded: {', '.join(status_parts)}"
|
272 |
-
print(status)
|
273 |
-
return success, status
|
274 |
-
|
275 |
-
|
276 |
def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
|
277 |
return {
|
278 |
'gaussian': {
|
@@ -370,12 +340,7 @@ def process_example_pipeline(example_prompt):
|
|
370 |
return example_prompt
|
371 |
|
372 |
def create_interface():
|
373 |
-
|
374 |
-
if PRELOAD_MODELS:
|
375 |
-
model_success, model_status_details = preload_models()
|
376 |
-
model_status = f"✅ {model_status_details}" if model_success else f"⚠️ {model_status_details}"
|
377 |
-
else:
|
378 |
-
model_status = "ℹ️ Models will be loaded on demand"
|
379 |
|
380 |
with gr.Blocks(css=css) as demo:
|
381 |
# Move session handlers INSIDE the Blocks context
|
|
|
26 |
# Global constants and default values
|
27 |
MAX_SEED = np.iinfo(np.int32).max
|
28 |
MAX_IMAGE_SIZE = 2048
|
|
|
29 |
|
30 |
# Default system prompt for text generation
|
31 |
DEFAULT_SYSTEM_PROMPT = """You are a product designer with strong knowledge in text-to-image generation. You will receive a product request in the form of a brief description, and your mission will be to imagine a new product design that meets this need.
|
|
|
243 |
}
|
244 |
"""
|
245 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
246 |
def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
|
247 |
return {
|
248 |
'gaussian': {
|
|
|
340 |
return example_prompt
|
341 |
|
342 |
def create_interface():
|
343 |
+
model_status = "ℹ️ Models will be loaded on demand"
|
|
|
|
|
|
|
|
|
|
|
344 |
|
345 |
with gr.Blocks(css=css) as demo:
|
346 |
# Move session handlers INSIDE the Blocks context
|