JiantaoLin
commited on
Commit
·
98297e9
1
Parent(s):
0182054
new
Browse files- app.py +0 -1
- pipeline/kiss3d_wrapper.py +1 -0
- pipeline/pipeline_config/default.yaml +5 -5
app.py
CHANGED
@@ -149,7 +149,6 @@ def text_to_detailed(prompt, seed=None):
|
|
149 |
# print(f"Before text_to_detailed: {torch.cuda.memory_allocated() / 1024**3} GB")
|
150 |
return k3d_wrapper.get_detailed_prompt(prompt, seed)
|
151 |
|
152 |
-
@spaces.GPU
|
153 |
def text_to_image(prompt, seed=None, strength=1.0,lora_scale=1.0, num_inference_steps=30, redux_hparam=None, init_image=None, **kwargs):
|
154 |
# print(f"Before text_to_image: {torch.cuda.memory_allocated() / 1024**3} GB")
|
155 |
k3d_wrapper.renew_uuid()
|
|
|
149 |
# print(f"Before text_to_detailed: {torch.cuda.memory_allocated() / 1024**3} GB")
|
150 |
return k3d_wrapper.get_detailed_prompt(prompt, seed)
|
151 |
|
|
|
152 |
def text_to_image(prompt, seed=None, strength=1.0,lora_scale=1.0, num_inference_steps=30, redux_hparam=None, init_image=None, **kwargs):
|
153 |
# print(f"Before text_to_image: {torch.cuda.memory_allocated() / 1024**3} GB")
|
154 |
k3d_wrapper.renew_uuid()
|
pipeline/kiss3d_wrapper.py
CHANGED
@@ -462,6 +462,7 @@ class kiss3d_wrapper(object):
|
|
462 |
logger.info(f'Save image to {save_path}')
|
463 |
|
464 |
return preprocessed
|
|
|
465 |
@spaces.GPU
|
466 |
def generate_3d_bundle_image_text(self,
|
467 |
prompt,
|
|
|
462 |
logger.info(f'Save image to {save_path}')
|
463 |
|
464 |
return preprocessed
|
465 |
+
|
466 |
@spaces.GPU
|
467 |
def generate_3d_bundle_image_text(self,
|
468 |
prompt,
|
pipeline/pipeline_config/default.yaml
CHANGED
@@ -6,7 +6,7 @@ flux:
|
|
6 |
redux: "black-forest-labs/FLUX.1-Redux-dev"
|
7 |
num_inference_steps: 20
|
8 |
seed: 42
|
9 |
-
device: 'cuda
|
10 |
|
11 |
multiview:
|
12 |
base_model: "sudo-ai/zero123plus-v1.2"
|
@@ -14,20 +14,20 @@ multiview:
|
|
14 |
unet: "./checkpoint/zero123++/flexgen_19w.ckpt"
|
15 |
num_inference_steps: 50
|
16 |
seed: 42
|
17 |
-
device: 'cuda
|
18 |
|
19 |
reconstruction:
|
20 |
model_config: "./models/lrm/config/PRM_inference.yaml"
|
21 |
base_model: "./checkpoint/lrm/final_ckpt.ckpt"
|
22 |
-
device: 'cuda
|
23 |
|
24 |
caption:
|
25 |
base_model: "multimodalart/Florence-2-large-no-flash-attn"
|
26 |
-
device: 'cuda
|
27 |
|
28 |
llm:
|
29 |
base_model: "Qwen/Qwen2-7B-Instruct"
|
30 |
-
device: 'cuda
|
31 |
|
32 |
use_zero_gpu: false # for huggingface demo only
|
33 |
3d_bundle_templates: './init_3d_Bundle'
|
|
|
6 |
redux: "black-forest-labs/FLUX.1-Redux-dev"
|
7 |
num_inference_steps: 20
|
8 |
seed: 42
|
9 |
+
device: 'cuda'
|
10 |
|
11 |
multiview:
|
12 |
base_model: "sudo-ai/zero123plus-v1.2"
|
|
|
14 |
unet: "./checkpoint/zero123++/flexgen_19w.ckpt"
|
15 |
num_inference_steps: 50
|
16 |
seed: 42
|
17 |
+
device: 'cuda'
|
18 |
|
19 |
reconstruction:
|
20 |
model_config: "./models/lrm/config/PRM_inference.yaml"
|
21 |
base_model: "./checkpoint/lrm/final_ckpt.ckpt"
|
22 |
+
device: 'cuda'
|
23 |
|
24 |
caption:
|
25 |
base_model: "multimodalart/Florence-2-large-no-flash-attn"
|
26 |
+
device: 'cuda'
|
27 |
|
28 |
llm:
|
29 |
base_model: "Qwen/Qwen2-7B-Instruct"
|
30 |
+
device: 'cuda'
|
31 |
|
32 |
use_zero_gpu: false # for huggingface demo only
|
33 |
3d_bundle_templates: './init_3d_Bundle'
|