Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
device = "cuda" | |
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") | |
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") | |
model.to(device) | |
def generate_text(prompt, max_new_tokens=100, do_sample=True): | |
model_inputs = tokenizer([prompt], return_tensors="pt").to(device) | |
generated_ids = model.generate(**model_inputs, max_new_tokens=max_new_tokens, do_sample=do_sample) | |
return tokenizer.batch_decode(generated_ids, skip_special_tokens=True) | |
st.title("KviGPT - Hugging Face Chat") | |
user_input = st.text_input("You:", value="My favourite condiment is ") | |
if st.button("Send"): | |
prompt = user_input | |
model_response = generate_text(prompt)[0] | |
st.write("KviGPT:", model_response) |