VanYsa commited on
Commit
f43260c
·
1 Parent(s): a2da9cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -125,7 +125,7 @@ def bot(history,message):
125
  """
126
  Prints the LLM's response in the chatbot
127
  """
128
- response = bot_response(message)
129
  history[-1][1] = ""
130
  for character in response:
131
  history[-1][1] += character
@@ -135,7 +135,7 @@ def bot(history,message):
135
  def bot_response(message):
136
  """
137
  Generates a response from the LLM model.
138
- max_new_tokens, temperature and top_p are set to 512, 0.6 and 0.9 respectively.
139
  """
140
  messages = [
141
  {"role": "system", "content": "You are a helpful AI assistant."},
@@ -155,13 +155,12 @@ def bot_response(message):
155
 
156
  outputs = pipeline(
157
  prompt,
158
- max_new_tokens=512,
159
  eos_token_id=terminators,
160
  do_sample=True,
161
  temperature=0.6,
162
  top_p=0.9,
163
  )
164
- print(outputs[0]["generated_text"][len(prompt):])
165
  return outputs[0]["generated_text"][len(prompt):]
166
 
167
  with gr.Blocks(
 
125
  """
126
  Prints the LLM's response in the chatbot
127
  """
128
+ response = "That's great!!!"#bot_response(message)
129
  history[-1][1] = ""
130
  for character in response:
131
  history[-1][1] += character
 
135
  def bot_response(message):
136
  """
137
  Generates a response from the LLM model.
138
+ max_new_tokens, temperature and top_p are set to 256, 0.6 and 0.9 respectively.
139
  """
140
  messages = [
141
  {"role": "system", "content": "You are a helpful AI assistant."},
 
155
 
156
  outputs = pipeline(
157
  prompt,
158
+ max_new_tokens=256,
159
  eos_token_id=terminators,
160
  do_sample=True,
161
  temperature=0.6,
162
  top_p=0.9,
163
  )
 
164
  return outputs[0]["generated_text"][len(prompt):]
165
 
166
  with gr.Blocks(