Spaces:
Running
on
Zero
Running
on
Zero
Logging in app
Browse files
app.py
CHANGED
@@ -16,18 +16,33 @@ from trellis.utils import render_utils, postprocessing_utils
|
|
16 |
|
17 |
import traceback
|
18 |
import sys
|
|
|
|
|
|
|
19 |
|
20 |
MAX_SEED = np.iinfo(np.int32).max
|
21 |
TMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp')
|
22 |
os.makedirs(TMP_DIR, exist_ok=True)
|
23 |
|
24 |
def start_session(req: gr.Request):
|
25 |
-
|
|
|
|
|
26 |
os.makedirs(user_dir, exist_ok=True)
|
27 |
|
28 |
def end_session(req: gr.Request):
|
29 |
-
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
|
33 |
return {
|
@@ -68,7 +83,9 @@ def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
|
|
68 |
return gs, mesh
|
69 |
|
70 |
def get_seed(randomize_seed: bool, seed: int) -> int:
|
71 |
-
|
|
|
|
|
72 |
|
73 |
@spaces.GPU
|
74 |
def text_to_3d(
|
@@ -80,7 +97,10 @@ def text_to_3d(
|
|
80 |
slat_sampling_steps: int,
|
81 |
req: gr.Request,
|
82 |
) -> Tuple[dict, str]:
|
83 |
-
|
|
|
|
|
|
|
84 |
outputs = pipeline.run(
|
85 |
prompt,
|
86 |
seed=seed,
|
@@ -94,13 +114,17 @@ def text_to_3d(
|
|
94 |
"cfg_strength": slat_guidance_strength,
|
95 |
},
|
96 |
)
|
|
|
|
|
97 |
video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
|
98 |
video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
|
99 |
video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
|
100 |
video_path = os.path.join(user_dir, 'sample.mp4')
|
101 |
imageio.mimsave(video_path, video, fps=15)
|
|
|
102 |
state = pack_state(outputs['gaussian'][0], outputs['mesh'][0])
|
103 |
torch.cuda.empty_cache()
|
|
|
104 |
return state, video_path
|
105 |
|
106 |
@spaces.GPU(duration=90)
|
@@ -110,12 +134,17 @@ def extract_glb(
|
|
110 |
texture_size: int,
|
111 |
req: gr.Request,
|
112 |
) -> Tuple[str, str]:
|
113 |
-
|
|
|
|
|
|
|
114 |
gs, mesh = unpack_state(state)
|
115 |
glb = postprocessing_utils.to_glb(gs, mesh, simplify=mesh_simplify, texture_size=texture_size, verbose=False)
|
116 |
glb_path = os.path.join(user_dir, 'sample.glb')
|
117 |
glb.export(glb_path)
|
|
|
118 |
torch.cuda.empty_cache()
|
|
|
119 |
return glb_path, glb_path
|
120 |
|
121 |
@spaces.GPU
|
|
|
16 |
|
17 |
import traceback
|
18 |
import sys
|
19 |
+
import logging
|
20 |
+
|
21 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - HF_SPACE - %(levelname)s - %(message)s')
|
22 |
|
23 |
MAX_SEED = np.iinfo(np.int32).max
|
24 |
TMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp')
|
25 |
os.makedirs(TMP_DIR, exist_ok=True)
|
26 |
|
27 |
def start_session(req: gr.Request):
|
28 |
+
session_hash = str(req.session_hash)
|
29 |
+
user_dir = os.path.join(TMP_DIR, session_hash)
|
30 |
+
logging.info(f"START SESSION: Creando directorio para la sesi贸n {session_hash} en {user_dir}")
|
31 |
os.makedirs(user_dir, exist_ok=True)
|
32 |
|
33 |
def end_session(req: gr.Request):
|
34 |
+
session_hash = str(req.session_hash)
|
35 |
+
user_dir = os.path.join(TMP_DIR, session_hash)
|
36 |
+
logging.info(f"END SESSION: Intentando eliminar el directorio de la sesi贸n {session_hash} en {user_dir}")
|
37 |
+
# Hacemos la eliminaci贸n m谩s robusta.
|
38 |
+
if os.path.exists(user_dir):
|
39 |
+
try:
|
40 |
+
shutil.rmtree(user_dir)
|
41 |
+
logging.info(f"Directorio de la sesi贸n {session_hash} eliminado correctamente.")
|
42 |
+
except Exception as e:
|
43 |
+
logging.error(f"Error al eliminar el directorio de la sesi贸n {session_hash}: {e}")
|
44 |
+
else:
|
45 |
+
logging.warning(f"El directorio de la sesi贸n {session_hash} no fue encontrado al intentar eliminarlo. Es posible que ya haya sido limpiado.")
|
46 |
|
47 |
def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
|
48 |
return {
|
|
|
83 |
return gs, mesh
|
84 |
|
85 |
def get_seed(randomize_seed: bool, seed: int) -> int:
|
86 |
+
new_seed = np.random.randint(0, MAX_SEED) if randomize_seed else seed
|
87 |
+
logging.info(f"Usando seed: {new_seed}")
|
88 |
+
return new_seed
|
89 |
|
90 |
@spaces.GPU
|
91 |
def text_to_3d(
|
|
|
97 |
slat_sampling_steps: int,
|
98 |
req: gr.Request,
|
99 |
) -> Tuple[dict, str]:
|
100 |
+
session_hash = str(req.session_hash)
|
101 |
+
logging.info(f"[{session_hash}] Iniciando text_to_3d con prompt: '{prompt[:50]}...'")
|
102 |
+
user_dir = os.path.join(TMP_DIR, session_hash)
|
103 |
+
|
104 |
outputs = pipeline.run(
|
105 |
prompt,
|
106 |
seed=seed,
|
|
|
114 |
"cfg_strength": slat_guidance_strength,
|
115 |
},
|
116 |
)
|
117 |
+
|
118 |
+
logging.info(f"[{session_hash}] Generaci贸n del modelo completada. Renderizando video...")
|
119 |
video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
|
120 |
video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
|
121 |
video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
|
122 |
video_path = os.path.join(user_dir, 'sample.mp4')
|
123 |
imageio.mimsave(video_path, video, fps=15)
|
124 |
+
|
125 |
state = pack_state(outputs['gaussian'][0], outputs['mesh'][0])
|
126 |
torch.cuda.empty_cache()
|
127 |
+
logging.info(f"[{session_hash}] Video renderizado y estado empaquetado. Devolviendo: {video_path}")
|
128 |
return state, video_path
|
129 |
|
130 |
@spaces.GPU(duration=90)
|
|
|
134 |
texture_size: int,
|
135 |
req: gr.Request,
|
136 |
) -> Tuple[str, str]:
|
137 |
+
session_hash = str(req.session_hash)
|
138 |
+
logging.info(f"[{session_hash}] Iniciando extract_glb...")
|
139 |
+
user_dir = os.path.join(TMP_DIR, session_hash)
|
140 |
+
|
141 |
gs, mesh = unpack_state(state)
|
142 |
glb = postprocessing_utils.to_glb(gs, mesh, simplify=mesh_simplify, texture_size=texture_size, verbose=False)
|
143 |
glb_path = os.path.join(user_dir, 'sample.glb')
|
144 |
glb.export(glb_path)
|
145 |
+
|
146 |
torch.cuda.empty_cache()
|
147 |
+
logging.info(f"[{session_hash}] GLB extra铆do. Devolviendo: {glb_path}")
|
148 |
return glb_path, glb_path
|
149 |
|
150 |
@spaces.GPU
|