Spaces:
Sleeping
Sleeping
Commit
·
8c237b7
1
Parent(s):
56db51b
enough with reasoning
Browse files
app.py
CHANGED
@@ -24,8 +24,8 @@ gif_html.markdown(
|
|
24 |
@st.cache_resource
|
25 |
def load_model():
|
26 |
# model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
|
27 |
-
|
28 |
-
model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
|
29 |
|
30 |
# model_id = "deepseek-ai/DeepSeek-R1-Distill-Llama-8B"
|
31 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
@@ -84,10 +84,8 @@ if st.button("Generate"):
|
|
84 |
|
85 |
|
86 |
# Generate text
|
87 |
-
chat = [{"role": "user", "content": prompt}]
|
88 |
-
formatted_prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
89 |
|
90 |
-
inputs = tokenizer(
|
91 |
with torch.no_grad():
|
92 |
outputs = model.generate( **inputs,
|
93 |
# max_new_tokens=100,
|
|
|
24 |
@st.cache_resource
|
25 |
def load_model():
|
26 |
# model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
|
27 |
+
model_id = "deepseek-ai/deepseek-llm-7b-chat"
|
28 |
+
# model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
|
29 |
|
30 |
# model_id = "deepseek-ai/DeepSeek-R1-Distill-Llama-8B"
|
31 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
|
84 |
|
85 |
|
86 |
# Generate text
|
|
|
|
|
87 |
|
88 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
89 |
with torch.no_grad():
|
90 |
outputs = model.generate( **inputs,
|
91 |
# max_new_tokens=100,
|