davidizzle commited on
Commit
11e6079
·
1 Parent(s): 873e964
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -84,17 +84,19 @@ if st.button("Generate"):
84
 
85
 
86
  # Generate text
 
 
87
 
88
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
89
  with torch.no_grad():
90
  outputs = model.generate( **inputs,
91
  # max_new_tokens=100,
92
- max_new_tokens=512,
93
  do_sample=True,
94
  temperature=0.6,
95
  top_p=0.95,
96
  top_k=50,
97
- num_return_sequences=1,
98
  eos_token_id=tokenizer.eos_token_id
99
  )
100
 
 
84
 
85
 
86
  # Generate text
87
+ chat = [{"role": "user", "content": prompt}]
88
+ formatted_prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
89
 
90
+ inputs = tokenizer(formatted_prompt, return_tensors="pt").to(model.device)
91
  with torch.no_grad():
92
  outputs = model.generate( **inputs,
93
  # max_new_tokens=100,
94
+ max_new_tokens=256,
95
  do_sample=True,
96
  temperature=0.6,
97
  top_p=0.95,
98
  top_k=50,
99
+ # num_return_sequences=1,
100
  eos_token_id=tokenizer.eos_token_id
101
  )
102