File size: 905 Bytes
bd2b8d3
 
 
 
67a9fa3
bd2b8d3
 
67a9fa3
bd2b8d3
 
67a9fa3
 
bd2b8d3
 
67a9fa3
 
bd2b8d3
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

# Load the Hugging Face API token from st.secrets
# hf_api_token = st.secrets["HUGGINGFACE_API_TOKEN"]

# Load the model and tokenizer using the API token
model_name = "TinyLlama/TinyLlama_v1.1"

# Create a text generation pipeline
# generator = pipeline("text-generation", model=model_name, token=hf_api_token)
generator = pipeline("text-generation", model=model_name)

# Streamlit UI
st.title("TinyLlama_v1.1")
#st.write(hf_api_token)

# Input prompt
prompt = st.text_input("Enter your prompt:", value="Explain the significance of the theory of relativity.")

# Generate text on button click
if st.button("Generate Text"):
    # Generate text using the pipeline
    output = generator(prompt, max_length=100, num_return_sequences=1)
    # Display the generated text
    st.write(output[0]['generated_text'])