Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,41 +1,36 @@
|
|
1 |
-
from transformers import
|
2 |
import torch
|
3 |
import os
|
4 |
|
|
|
5 |
os.environ['HF_HOME'] = '/tmp/cache'
|
6 |
|
7 |
-
#
|
8 |
-
|
9 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
10 |
-
model = AutoModelForCausalLM.from_pretrained(
|
11 |
-
model_id,
|
12 |
-
device_map="auto",
|
13 |
-
torch_dtype=torch.float16
|
14 |
-
)
|
15 |
|
16 |
-
#
|
17 |
-
|
18 |
"text-generation",
|
19 |
-
model=
|
20 |
-
|
21 |
-
|
|
|
22 |
)
|
23 |
|
24 |
-
def generate_chat_completion(messages, max_tokens=
|
25 |
-
"""Generate chat response
|
26 |
-
# Format messages as prompt
|
27 |
-
prompt =
|
28 |
-
|
29 |
-
tokenize=False,
|
30 |
-
add_generation_prompt=True
|
31 |
-
)
|
32 |
|
33 |
-
# Generate response
|
34 |
-
|
35 |
prompt,
|
36 |
max_new_tokens=max_tokens,
|
37 |
temperature=temperature,
|
|
|
|
|
38 |
do_sample=True
|
39 |
)
|
40 |
|
41 |
-
return
|
|
|
1 |
+
from transformers import pipeline
|
2 |
import torch
|
3 |
import os
|
4 |
|
5 |
+
# Configure cache
|
6 |
os.environ['HF_HOME'] = '/tmp/cache'
|
7 |
|
8 |
+
# Use a reliable model that works in Spaces
|
9 |
+
MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.2" # 8K context, good performance
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
+
# Initialize the pipeline with your settings
|
12 |
+
generator = pipeline(
|
13 |
"text-generation",
|
14 |
+
model=MODEL_NAME,
|
15 |
+
device_map="auto",
|
16 |
+
torch_dtype=torch.bfloat16,
|
17 |
+
max_new_tokens=560
|
18 |
)
|
19 |
|
20 |
+
def generate_chat_completion(messages, max_tokens=560, temperature=0.8):
|
21 |
+
"""Generate chat response with precise control"""
|
22 |
+
# Format messages as instruction prompt
|
23 |
+
prompt = "\n".join([f"{msg['role'].capitalize()}: {msg['content']}" for msg in messages])
|
24 |
+
prompt += "\nAssistant:"
|
|
|
|
|
|
|
25 |
|
26 |
+
# Generate response with your settings
|
27 |
+
response = generator(
|
28 |
prompt,
|
29 |
max_new_tokens=max_tokens,
|
30 |
temperature=temperature,
|
31 |
+
top_p=0.95,
|
32 |
+
repetition_penalty=1.15,
|
33 |
do_sample=True
|
34 |
)
|
35 |
|
36 |
+
return response[0]['generated_text'].replace(prompt, "").strip()
|