kingabzpro commited on
Commit
e3bfc24
·
1 Parent(s): bc7e1a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  import os
3
-
 
4
 
5
  HF_TOKEN = os.getenv('HF_TOKEN')
6
  hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "Rick-bot-flags")
@@ -30,7 +31,7 @@ def predict(input, history=[]):
30
  bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
31
 
32
  # generate a response
33
- history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
34
 
35
  # convert the tokens to text, and then split the responses into lines
36
  response = tokenizer.decode(history[0]).replace("<|endoftext|>", "\n")
@@ -38,9 +39,8 @@ def predict(input, history=[]):
38
  return response, history
39
 
40
  gr.Interface(fn= predict,
41
- css=".footer {display:none !important}",
42
- inputs=["text", "state"],
43
- outputs=["text", "state"],
44
  theme ="grass",
45
  title = title,
46
  flagging_callback=hf_writer,
 
1
  import gradio as gr
2
  import os
3
+ import subprocess
4
+ subprocess.run(["pip", "install","gradio==2.8.0b10"])
5
 
6
  HF_TOKEN = os.getenv('HF_TOKEN')
7
  hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "Rick-bot-flags")
 
31
  bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
32
 
33
  # generate a response
34
+ history = model.generate(bot_input_ids, max_length=80, pad_token_id=tokenizer.eos_token_id).tolist()
35
 
36
  # convert the tokens to text, and then split the responses into lines
37
  response = tokenizer.decode(history[0]).replace("<|endoftext|>", "\n")
 
39
  return response, history
40
 
41
  gr.Interface(fn= predict,
42
+ inputs="textbox",
43
+ outputs="chat",
 
44
  theme ="grass",
45
  title = title,
46
  flagging_callback=hf_writer,