Tonic commited on
Commit
42eab30
·
1 Parent(s): 2a6a31f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -21
app.py CHANGED
@@ -44,28 +44,34 @@ model = transformers.AutoModelForCausalLM.from_pretrained(model_name,
44
  model.eval()
45
 
46
  class StarlingBot:
47
- def __init__(self, system_prompt="The following dialogue is a conversation"):
48
- self.system_prompt = system_prompt
49
 
50
- def predict(self, user_message, assistant_message, system_prompt, do_sample, temperature=0.4, max_new_tokens=700, top_p=0.99, repetition_penalty=1.9):
51
- conversation = f" <s> [INST] {self.system_prompt} [INST] {assistant_message if assistant_message else ''} </s> [/INST] {user_message} </s> "
52
- input_ids = tokenizer.encode(conversation, return_tensors="pt", add_special_tokens=False)
53
- input_ids = input_ids.to(device)
54
- response = model.generate(
55
- input_ids=input_ids,
56
- use_cache=False,
57
- early_stopping=False,
58
- bos_token_id=model.config.bos_token_id,
59
- eos_token_id=model.config.eos_token_id,
60
- pad_token_id=model.config.eos_token_id,
61
- temperature=temperature,
62
- do_sample=True,
63
- max_new_tokens=max_new_tokens,
64
- top_p=top_p,
65
- repetition_penalty=repetition_penalty
66
- )
67
- response_text = tokenizer.decode(response[0], skip_special_tokens=True)
68
- return response_text
 
 
 
 
 
 
69
 
70
  starling_bot = StarlingBot()
71
 
 
44
  model.eval()
45
 
46
  class StarlingBot:
47
+ def __init__(self, system_prompt="The following dialogue is a conversation"):
48
+ self.system_prompt = system_prompt
49
 
50
+ def predict(self, user_message, assistant_message, system_prompt, do_sample, temperature=0.4, max_new_tokens=700, top_p=0.99, repetition_penalty=1.9):
51
+ conversation = f" <s> [INST] {self.system_prompt} [INST] {assistant_message if assistant_message else ''} </s> [/INST] {user_message} </s> "
52
+ input_ids = tokenizer.encode(conversation, return_tensors="pt", add_special_tokens=False)
53
+ input_ids = input_ids.to(device)
54
+ response = model.generate(
55
+ input_ids=input_ids,
56
+ use_cache=False,
57
+ early_stopping=False,
58
+ bos_token_id=model.config.bos_token_id,
59
+ eos_token_id=model.config.eos_token_id,
60
+ pad_token_id=model.config.eos_token_id,
61
+ temperature=temperature,
62
+ do_sample=True,
63
+ max_new_tokens=max_new_tokens,
64
+ top_p=top_p,
65
+ repetition_penalty=repetition_penalty
66
+ )
67
+ response_text = tokenizer.decode(response[0], skip_special_tokens=True)
68
+ response_text = response.strip()
69
+ # response_text = response.split("<|assistant|>\n")[-1]
70
+ return response_text
71
+ finally:
72
+ del input_ids, attention_mask, output_ids
73
+ gc.collect()
74
+ torch.cuda.empty_cache()
75
 
76
  starling_bot = StarlingBot()
77