Yokky009 commited on
Commit
8baf7bb
·
verified ·
1 Parent(s): dbc9005

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -35
app.py CHANGED
@@ -1,41 +1,14 @@
 
1
  import torch
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
- DEFAULT_SYSTEM_PROMPT = "あなたは誠実で優秀な日本人のアシスタントです。特に指示が無い場合は、常に日本語で回答してください。"
5
- text = "仕事の熱意を取り戻すためのアイデアを5つ挙げてください。"
6
 
7
- model_name = "elyza/Llama-3-ELYZA-JP-8B"
8
 
9
- tokenizer = AutoTokenizer.from_pretrained(model_name)
10
- model = AutoModelForCausalLM.from_pretrained(
11
- model_name,
12
- torch_dtype="auto",
13
- device_map="auto",
14
- )
15
- model.eval()
16
 
17
- messages = [
18
- {"role": "system", "content": DEFAULT_SYSTEM_PROMPT},
19
- {"role": "user", "content": text},
20
- ]
21
- prompt = tokenizer.apply_chat_template(
22
- messages,
23
- tokenize=False,
24
- add_generation_prompt=True
25
- )
26
- token_ids = tokenizer.encode(
27
- prompt, add_special_tokens=False, return_tensors="pt"
28
- )
29
 
30
- with torch.no_grad():
31
- output_ids = model.generate(
32
- token_ids.to(model.device),
33
- max_new_tokens=1200,
34
- do_sample=True,
35
- temperature=0.6,
36
- top_p=0.9,
37
- )
38
- output = tokenizer.decode(
39
- output_ids.tolist()[0][token_ids.size(1):], skip_special_tokens=True
40
- )
41
- print(output)
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
2
  import torch
 
3
 
4
+ tokenizer = AutoTokenizer.from_pretrained("lightblue/ao-karasu-72B")
5
+ model = AutoModelForCausalLM.from_pretrained("lightblue/ao-karasu-72B", device_map="auto")
6
 
7
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
8
 
9
+ messages = [{"role": "system", "content": "あなたはAIアシスタントです。"}]
10
+ messages.append({"role": "user", "content": "イギリスの首相は誰ですか?"})
 
 
 
 
 
11
 
12
+ prompt = tokenizer.apply_chat_template(conversation=messages, add_generation_prompt=True, tokenize=False)
 
 
 
 
 
 
 
 
 
 
 
13
 
14
+ pipe(prompt, max_new_tokens=100, do_sample=False, temperature=0.0, return_full_text=False)