Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -1,41 +1,14 @@
|
|
|
|
1 |
import torch
|
2 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
|
4 |
-
|
5 |
-
|
6 |
|
7 |
-
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
model_name,
|
12 |
-
torch_dtype="auto",
|
13 |
-
device_map="auto",
|
14 |
-
)
|
15 |
-
model.eval()
|
16 |
|
17 |
-
messages =
|
18 |
-
{"role": "system", "content": DEFAULT_SYSTEM_PROMPT},
|
19 |
-
{"role": "user", "content": text},
|
20 |
-
]
|
21 |
-
prompt = tokenizer.apply_chat_template(
|
22 |
-
messages,
|
23 |
-
tokenize=False,
|
24 |
-
add_generation_prompt=True
|
25 |
-
)
|
26 |
-
token_ids = tokenizer.encode(
|
27 |
-
prompt, add_special_tokens=False, return_tensors="pt"
|
28 |
-
)
|
29 |
|
30 |
-
|
31 |
-
output_ids = model.generate(
|
32 |
-
token_ids.to(model.device),
|
33 |
-
max_new_tokens=1200,
|
34 |
-
do_sample=True,
|
35 |
-
temperature=0.6,
|
36 |
-
top_p=0.9,
|
37 |
-
)
|
38 |
-
output = tokenizer.decode(
|
39 |
-
output_ids.tolist()[0][token_ids.size(1):], skip_special_tokens=True
|
40 |
-
)
|
41 |
-
print(output)
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
2 |
import torch
|
|
|
3 |
|
4 |
+
tokenizer = AutoTokenizer.from_pretrained("lightblue/ao-karasu-72B")
|
5 |
+
model = AutoModelForCausalLM.from_pretrained("lightblue/ao-karasu-72B", device_map="auto")
|
6 |
|
7 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
8 |
|
9 |
+
messages = [{"role": "system", "content": "あなたはAIアシスタントです。"}]
|
10 |
+
messages.append({"role": "user", "content": "イギリスの首相は誰ですか?"})
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
+
prompt = tokenizer.apply_chat_template(conversation=messages, add_generation_prompt=True, tokenize=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
+
pipe(prompt, max_new_tokens=100, do_sample=False, temperature=0.0, return_full_text=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|