File size: 936 Bytes
bd2b8d3
 
 
 
 
 
 
e460bce
bd2b8d3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

# Load the Hugging Face API token from st.secrets
hf_api_token = st.secrets["HUGGINGFACE_API_TOKEN"]

# Load the model and tokenizer using the API token
model_name = "meta-llama/Meta-Llama-3.1-8B"
tokenizer = pipeline('text-generation', model_name, token=hf_api_token)

# Create a text generation pipeline
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)

# Streamlit UI
st.title("LLaMA 3.1-405B Model Text Generation")
st.write(hf_api_token)

# Input prompt
prompt = st.text_input("Enter your prompt:", value="Explain the significance of the theory of relativity.")

# Generate text on button click
if st.button("Generate Text"):
    # Generate text using the pipeline
    output = generator(prompt, max_length=100, num_return_sequences=1)
    # Display the generated text
    st.write(output[0]['generated_text'])