Spaces:
Sleeping
Sleeping
FERRETTIV
commited on
Commit
Β·
7f91510
1
Parent(s):
d0b0be2
change model
Browse files
app.py
CHANGED
|
@@ -3,13 +3,13 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
| 3 |
import torch
|
| 4 |
|
| 5 |
# deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct
|
| 6 |
-
|
| 7 |
# model_id = "deepseek-ai/deepseek-coder-33b-instruct"
|
| 8 |
# model_id = "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"
|
| 9 |
# model_id = "deepseek-ai/DeepSeek-Coder-V2-Instruct"
|
| 10 |
|
| 11 |
# This works best
|
| 12 |
-
model_id = "deepseek-ai/deepseek-coder-1.3b-instruct"
|
| 13 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 14 |
model = AutoModelForCausalLM.from_pretrained(model_id,
|
| 15 |
# device_map=None,
|
|
@@ -38,17 +38,6 @@ def generate_code(prompt, style="Clean & Pythonic"):
|
|
| 38 |
# spinner.update(visible=False)
|
| 39 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 40 |
|
| 41 |
-
# demo = gr.Interface(
|
| 42 |
-
# fn=generate_code,
|
| 43 |
-
# inputs=[
|
| 44 |
-
# gr.Textbox(label="How shall Codice Da Vinci help today?", lines=3),
|
| 45 |
-
# gr.Dropdown(["Clean & Pythonic", "Verbose like a 15th-century manuscript"], label="Code Style")
|
| 46 |
-
# ],
|
| 47 |
-
# outputs=gr.Code(label="π§Ύ Leonardo's Work"),
|
| 48 |
-
# title="Codice Da Vinci ππ»",
|
| 49 |
-
# description="Your Renaissance coding assistant. Fluent in algorithms and Latin. Powered by LLM."
|
| 50 |
-
# )
|
| 51 |
-
|
| 52 |
with gr.Blocks() as demo:
|
| 53 |
gr.Markdown("<h1 style='text-align:center;'>Codice Da Vinci ππ»</h1>")
|
| 54 |
|
|
|
|
| 3 |
import torch
|
| 4 |
|
| 5 |
# deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct
|
| 6 |
+
model_id = "deepseek-ai/deepseek-coder-6.7b-instruct"
|
| 7 |
# model_id = "deepseek-ai/deepseek-coder-33b-instruct"
|
| 8 |
# model_id = "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"
|
| 9 |
# model_id = "deepseek-ai/DeepSeek-Coder-V2-Instruct"
|
| 10 |
|
| 11 |
# This works best
|
| 12 |
+
# model_id = "deepseek-ai/deepseek-coder-1.3b-instruct"
|
| 13 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 14 |
model = AutoModelForCausalLM.from_pretrained(model_id,
|
| 15 |
# device_map=None,
|
|
|
|
| 38 |
# spinner.update(visible=False)
|
| 39 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
with gr.Blocks() as demo:
|
| 42 |
gr.Markdown("<h1 style='text-align:center;'>Codice Da Vinci ππ»</h1>")
|
| 43 |
|