Spaces:
Sleeping
Sleeping
Commit
·
60af4cb
1
Parent(s):
c19fa74
Spinner
Browse files
app.py
CHANGED
|
@@ -3,12 +3,14 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
| 3 |
import torch
|
| 4 |
|
| 5 |
# deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct
|
| 6 |
-
model_id = "deepseek-ai/deepseek-coder-1.3b-instruct"
|
| 7 |
# model_id = "deepseek-ai/deepseek-coder-6.7b-instruct"
|
| 8 |
# model_id = "deepseek-ai/deepseek-coder-33b-instruct"
|
| 9 |
# model_id = "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"
|
| 10 |
# model_id = "deepseek-ai/DeepSeek-Coder-V2-Instruct"
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
| 12 |
model = AutoModelForCausalLM.from_pretrained(model_id,
|
| 13 |
# device_map=None,
|
| 14 |
# torch_dtype=torch.float32,
|
|
@@ -18,7 +20,13 @@ model = AutoModelForCausalLM.from_pretrained(model_id,
|
|
| 18 |
)
|
| 19 |
# model.to("cpu")
|
| 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
def generate_code(prompt, style="Clean & Pythonic"):
|
|
|
|
| 22 |
if style == "Verbose like a 15th-century manuscript":
|
| 23 |
prompt = "In a manner most detailed, write code that... " + prompt
|
| 24 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
|
@@ -32,6 +40,7 @@ def generate_code(prompt, style="Clean & Pythonic"):
|
|
| 32 |
num_return_sequences=1,
|
| 33 |
eos_token_id=tokenizer.eos_token_id
|
| 34 |
)
|
|
|
|
| 35 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 36 |
|
| 37 |
demo = gr.Interface(
|
|
|
|
| 3 |
import torch
|
| 4 |
|
| 5 |
# deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct
|
|
|
|
| 6 |
# model_id = "deepseek-ai/deepseek-coder-6.7b-instruct"
|
| 7 |
# model_id = "deepseek-ai/deepseek-coder-33b-instruct"
|
| 8 |
# model_id = "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"
|
| 9 |
# model_id = "deepseek-ai/DeepSeek-Coder-V2-Instruct"
|
| 10 |
+
|
| 11 |
+
# This works best
|
| 12 |
+
model_id = "deepseek-ai/deepseek-coder-1.3b-instruct"
|
| 13 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 14 |
model = AutoModelForCausalLM.from_pretrained(model_id,
|
| 15 |
# device_map=None,
|
| 16 |
# torch_dtype=torch.float32,
|
|
|
|
| 20 |
)
|
| 21 |
# model.to("cpu")
|
| 22 |
|
| 23 |
+
spinner = gr.HTML(
|
| 24 |
+
"<div style='text-align:center'><img src='https://media2.giphy.com/media/v1.Y2lkPTc5MGI3NjExMXViMm02MnR6bGJ4c2h3ajYzdWNtNXNtYnNic3lnN2xyZzlzbm9seSZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9cw/k32ddF9WVs44OUaZAm/giphy.gif' width='180'></div>",
|
| 25 |
+
visible=False # hidden by default
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
def generate_code(prompt, style="Clean & Pythonic"):
|
| 29 |
+
spinner.update(visible=True)
|
| 30 |
if style == "Verbose like a 15th-century manuscript":
|
| 31 |
prompt = "In a manner most detailed, write code that... " + prompt
|
| 32 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
|
|
|
| 40 |
num_return_sequences=1,
|
| 41 |
eos_token_id=tokenizer.eos_token_id
|
| 42 |
)
|
| 43 |
+
spinner.update(visible=False)
|
| 44 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 45 |
|
| 46 |
demo = gr.Interface(
|