Spaces:
Runtime error
Runtime error
lionelgarnier
commited on
Commit
·
346d9f6
1
Parent(s):
008680f
uncomment 1
Browse files
app.py
CHANGED
@@ -205,7 +205,7 @@ def generate_image(prompt, seed=DEFAULT_SEED,
|
|
205 |
progress(1.0, desc="Complete")
|
206 |
return image, f"Image generated successfully with seed {seed}"
|
207 |
except Exception as e:
|
208 |
-
print(f"Error in
|
209 |
return None, f"Error generating image: {str(e)}"
|
210 |
|
211 |
|
@@ -251,44 +251,44 @@ def preload_models():
|
|
251 |
return success, status
|
252 |
|
253 |
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
|
270 |
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
|
291 |
-
|
292 |
|
293 |
|
294 |
# @spaces.GPU
|
@@ -512,7 +512,6 @@ def create_interface():
|
|
512 |
|
513 |
output_buf = gr.State()
|
514 |
|
515 |
-
# Examples section - simplified version that only updates the prompt fields
|
516 |
gr.Examples(
|
517 |
examples=examples,
|
518 |
fn=process_example_pipeline,
|
@@ -521,7 +520,6 @@ def create_interface():
|
|
521 |
cache_examples=True,
|
522 |
)
|
523 |
|
524 |
-
# Event handlers - Fixed to use the renamed components
|
525 |
gr.on(
|
526 |
triggers=[prompt_button.click, prompt.submit],
|
527 |
fn=refine_prompt,
|
|
|
205 |
progress(1.0, desc="Complete")
|
206 |
return image, f"Image generated successfully with seed {seed}"
|
207 |
except Exception as e:
|
208 |
+
print(f"Error in generate_image: {str(e)}")
|
209 |
return None, f"Error generating image: {str(e)}"
|
210 |
|
211 |
|
|
|
251 |
return success, status
|
252 |
|
253 |
|
254 |
+
def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
|
255 |
+
return {
|
256 |
+
'gaussian': {
|
257 |
+
**gs.init_params,
|
258 |
+
'_xyz': gs._xyz.cpu().numpy(),
|
259 |
+
'_features_dc': gs._features_dc.cpu().numpy(),
|
260 |
+
'_scaling': gs._scaling.cpu().numpy(),
|
261 |
+
'_rotation': gs._rotation.cpu().numpy(),
|
262 |
+
'_opacity': gs._opacity.cpu().numpy(),
|
263 |
+
},
|
264 |
+
'mesh': {
|
265 |
+
'vertices': mesh.vertices.cpu().numpy(),
|
266 |
+
'faces': mesh.faces.cpu().numpy(),
|
267 |
+
},
|
268 |
+
}
|
269 |
|
270 |
|
271 |
+
def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
|
272 |
+
gs = Gaussian(
|
273 |
+
aabb=state['gaussian']['aabb'],
|
274 |
+
sh_degree=state['gaussian']['sh_degree'],
|
275 |
+
mininum_kernel_size=state['gaussian']['mininum_kernel_size'],
|
276 |
+
scaling_bias=state['gaussian']['scaling_bias'],
|
277 |
+
opacity_bias=state['gaussian']['opacity_bias'],
|
278 |
+
scaling_activation=state['gaussian']['scaling_activation'],
|
279 |
+
)
|
280 |
+
gs._xyz = torch.tensor(state['gaussian']['_xyz'], device='cuda')
|
281 |
+
gs._features_dc = torch.tensor(state['gaussian']['_features_dc'], device='cuda')
|
282 |
+
gs._scaling = torch.tensor(state['gaussian']['_scaling'], device='cuda')
|
283 |
+
gs._rotation = torch.tensor(state['gaussian']['_rotation'], device='cuda')
|
284 |
+
gs._opacity = torch.tensor(state['gaussian']['_opacity'], device='cuda')
|
285 |
|
286 |
+
mesh = edict(
|
287 |
+
vertices=torch.tensor(state['mesh']['vertices'], device='cuda'),
|
288 |
+
faces=torch.tensor(state['mesh']['faces'], device='cuda'),
|
289 |
+
)
|
290 |
|
291 |
+
return gs, mesh
|
292 |
|
293 |
|
294 |
# @spaces.GPU
|
|
|
512 |
|
513 |
output_buf = gr.State()
|
514 |
|
|
|
515 |
gr.Examples(
|
516 |
examples=examples,
|
517 |
fn=process_example_pipeline,
|
|
|
520 |
cache_examples=True,
|
521 |
)
|
522 |
|
|
|
523 |
gr.on(
|
524 |
triggers=[prompt_button.click, prompt.submit],
|
525 |
fn=refine_prompt,
|