Kvikontent commited on
Commit
027fad9
·
1 Parent(s): 975ef88

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -16
app.py CHANGED
@@ -1,22 +1,23 @@
 
1
  import streamlit as st
2
- from chatterbot import ChatBot
3
- from chatterbot.trainers import ChatterBotCorpusTrainer
4
 
5
- # Create a chatbot
6
- chatbot = ChatBot("KviGPT")
7
- trainer = ChatterBotCorpusTrainer(chatbot)
8
 
9
- # Use the English corpus to train the chatbot
10
- trainer.train("chatterbot.corpus.english")
11
 
12
- # Define the function to get the chatbot response
13
- def get_response(input_text):
14
- response = chatbot.get_response(input_text)
15
- return str(response)
 
 
 
 
16
 
17
- # Create the Streamlit app
18
- st.title("KviGPT Chatbot")
19
- user_input = st.text_area("You:", height=200)
20
  if st.button("Send"):
21
- bot_response = get_response(user_input)
22
- st.text_area("KviGPT:", value=bot_response, height=200)
 
 
1
+ !pip install streamlit transformers
2
  import streamlit as st
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
 
4
 
5
+ device = "cuda"
6
+ model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
7
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
8
 
9
+ model.to(device)
 
10
 
11
+ def generate_text(prompt, max_new_tokens=100, do_sample=True):
12
+ model_inputs = tokenizer([prompt], return_tensors="pt").to(device)
13
+ generated_ids = model.generate(**model_inputs, max_new_tokens=max_new_tokens, do_sample=do_sample)
14
+ return tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
15
+
16
+ st.title("KviGPT - Hugging Face Chat")
17
+
18
+ user_input = st.text_input("You:", value="My favourite condiment is ")
19
 
 
 
 
20
  if st.button("Send"):
21
+ prompt = user_input
22
+ model_response = generate_text(prompt)[0]
23
+ st.write("KviGPT:", model_response)