flatindo commited on
Commit
e0b4e38
·
1 Parent(s): 06e1e0a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -20
app.py CHANGED
@@ -1,25 +1,25 @@
1
- import gradio
2
- from transformers import pipeline
3
 
4
- # Function to generate chatbot responses
5
- def generate_chatbot_response(prompt):
6
- chatbot = pipeline("text-generation", model="EleutherAI/gpt-neo-2.7B")
7
- chatbot_response = chatbot(prompt, max_length=50, num_return_sequences=1)[0]["generated_text"]
8
- return chatbot_response
9
 
10
- # Function to handle the web app logic
11
- def chatbot_app(prompt):
12
- chatbot_response = generate_chatbot_response(prompt)
13
- return {"Chatbot Response": chatbot_response}
14
 
15
- # Create the Gradio interface
16
- iface = gradio.Interface(
17
- fn=chatbot_app,
18
- inputs="text",
19
- outputs="text",
20
- title="Chatbot App",
21
- description="Enter a prompt to generate a chatbot response."
22
- )
23
 
24
- # Run the Gradio app
 
 
 
 
 
 
 
 
25
  iface.launch()
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
+ model_name = "gpt-3.5-turbo"
 
 
 
 
5
 
6
+ # Load the model and tokenizer
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
 
9
 
10
+ # Generate a response
11
+ def generate_response(prompt):
12
+ inputs = tokenizer.encode(prompt, return_tensors="pt")
13
+ response = model.generate(inputs, max_length=100, num_return_sequences=1)
14
+ return tokenizer.decode(response[0], skip_special_tokens=True)
 
 
 
15
 
16
+ # Gradio interface
17
+ def chatbot_interface(prompt):
18
+ response = generate_response(prompt)
19
+ return response
20
+
21
+ # Create a Gradio interface
22
+ iface = gr.Interface(fn=chatbot_interface, inputs="text", outputs="text")
23
+
24
+ # Launch the interface
25
  iface.launch()