GPT-Kalki / app.py
tsaditya's picture
Update app.py
5b4ec6f
raw
history blame
993 Bytes
import streamlit as st
from transformers import AutoTokenizer, AutoModelWithLMHead, pipeline
def getit(prompt):
generated = tokenizer(f'<|startoftext|> {prompt}', return_tensors="pt").input_ids.cpu()
sample_outputs = sample_outputs = model.generate(
generated,
do_sample=True,
max_length=512,
top_k=50,
top_p=0.95,
num_return_sequences=1,
no_repeat_ngram_size = 3,
temperature = 0.7
)
predicted_text = tokenizer.decode(sample_outputs[0], skip_special_tokens=True)
return predicted_text[len(prompt):]
model_name = 'tsaditya/GPT-Kalki'
model = AutoModelWithLMHead.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
inp = st.text_input(value="மணிமேகலை! உன் மனோரதம் நிறைவேறிவிட்டது.")
out = getit(inp)
st.write(out)
video_file = open('myvideo.mp4', 'rb')
video_bytes = video_file.read()
st.video(video_bytes)