Kvikontent commited on
Commit
399203a
·
1 Parent(s): acaca81

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -11
app.py CHANGED
@@ -1,13 +1,3 @@
1
- from transformers import AutoTokenizer, AutoModelForCausalLM
2
- import torch
3
- import gradio as gr
4
-
5
- # Load the tokenizer and model
6
- tokenizer = AutoTokenizer.from_pretrained("gpt-chatbot")
7
- model = AutoModelForCausalLM.from_pretrained("gpt-chatbot")
8
-
9
-
10
- def generate_response(input_text):
11
  input_ids = tokenizer.encode(input_text + " >> User: ", return_tensors='pt')
12
  generated_output = model.generate(input_ids, max_length=100, num_return_sequences=1)
13
  response = tokenizer.decode(generated_output[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
@@ -24,4 +14,4 @@ iface = gr.Interface(
24
  examples=[['Hello'], ['How are you?'], ['What is your name?']],
25
  )
26
 
27
- iface.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
1
  input_ids = tokenizer.encode(input_text + " >> User: ", return_tensors='pt')
2
  generated_output = model.generate(input_ids, max_length=100, num_return_sequences=1)
3
  response = tokenizer.decode(generated_output[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
 
14
  examples=[['Hello'], ['How are you?'], ['What is your name?']],
15
  )
16
 
17
+ iface.launch(share=True)