JiantaoLin
commited on
Commit
Β·
8ed8caa
1
Parent(s):
3ddd275
new
Browse files- app.py +1 -1
- models/llm/llm.py +2 -1
- pipeline/kiss3d_wrapper.py +1 -1
app.py
CHANGED
|
@@ -482,7 +482,7 @@ def main():
|
|
| 482 |
|
| 483 |
# demo.queue(default_concurrency_limit=1)
|
| 484 |
# demo.launch(server_name="0.0.0.0", server_port=9239)
|
| 485 |
-
subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
|
| 486 |
demo.launch()
|
| 487 |
|
| 488 |
|
|
|
|
| 482 |
|
| 483 |
# demo.queue(default_concurrency_limit=1)
|
| 484 |
# demo.launch(server_name="0.0.0.0", server_port=9239)
|
| 485 |
+
# subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
|
| 486 |
demo.launch()
|
| 487 |
|
| 488 |
|
models/llm/llm.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
|
| 2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 3 |
import torch
|
|
|
|
| 4 |
# device = "cuda" # the device to load the model onto
|
| 5 |
model_name_or_dir="Qwen/Qwen2-7B-Instruct"
|
| 6 |
|
|
@@ -55,7 +56,7 @@ def load_llm_model(model_name_or_dir, torch_dtype='auto', device_map='cpu'):
|
|
| 55 |
# print(f"Before load llm model: {torch.cuda.memory_allocated() / 1024**3} GB")
|
| 56 |
# load_model()
|
| 57 |
# print(f"After load llm model: {torch.cuda.memory_allocated() / 1024**3} GB")
|
| 58 |
-
|
| 59 |
def get_llm_response(model, tokenizer, user_prompt, seed=None, system_prompt=DEFAULT_SYSTEM_PROMPT):
|
| 60 |
# global model
|
| 61 |
# global tokenizer
|
|
|
|
| 1 |
|
| 2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 3 |
import torch
|
| 4 |
+
import spaces
|
| 5 |
# device = "cuda" # the device to load the model onto
|
| 6 |
model_name_or_dir="Qwen/Qwen2-7B-Instruct"
|
| 7 |
|
|
|
|
| 56 |
# print(f"Before load llm model: {torch.cuda.memory_allocated() / 1024**3} GB")
|
| 57 |
# load_model()
|
| 58 |
# print(f"After load llm model: {torch.cuda.memory_allocated() / 1024**3} GB")
|
| 59 |
+
@spaces.GPU
|
| 60 |
def get_llm_response(model, tokenizer, user_prompt, seed=None, system_prompt=DEFAULT_SYSTEM_PROMPT):
|
| 61 |
# global model
|
| 62 |
# global tokenizer
|
pipeline/kiss3d_wrapper.py
CHANGED
|
@@ -256,7 +256,7 @@ class kiss3d_wrapper(object):
|
|
| 256 |
caption_text = self.get_detailed_prompt(caption_text)
|
| 257 |
|
| 258 |
return caption_text
|
| 259 |
-
|
| 260 |
def get_detailed_prompt(self, prompt, seed=None):
|
| 261 |
if self.llm_model is not None:
|
| 262 |
detailed_prompt = get_llm_response(self.llm_model, self.llm_tokenizer, prompt, seed=seed)
|
|
|
|
| 256 |
caption_text = self.get_detailed_prompt(caption_text)
|
| 257 |
|
| 258 |
return caption_text
|
| 259 |
+
@spaces.GPU
|
| 260 |
def get_detailed_prompt(self, prompt, seed=None):
|
| 261 |
if self.llm_model is not None:
|
| 262 |
detailed_prompt = get_llm_response(self.llm_model, self.llm_tokenizer, prompt, seed=seed)
|