Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -2,17 +2,18 @@ import streamlit as st
|
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
3 |
|
4 |
# Load the Hugging Face API token from st.secrets
|
5 |
-
hf_api_token = st.secrets["HUGGINGFACE_API_TOKEN"]
|
6 |
|
7 |
# Load the model and tokenizer using the API token
|
8 |
-
model_name = "
|
9 |
|
10 |
# Create a text generation pipeline
|
11 |
-
generator = pipeline("text-generation", model=model_name, token=hf_api_token)
|
|
|
12 |
|
13 |
# Streamlit UI
|
14 |
-
st.title("
|
15 |
-
st.write(hf_api_token)
|
16 |
|
17 |
# Input prompt
|
18 |
prompt = st.text_input("Enter your prompt:", value="Explain the significance of the theory of relativity.")
|
|
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
3 |
|
4 |
# Load the Hugging Face API token from st.secrets
|
5 |
+
# hf_api_token = st.secrets["HUGGINGFACE_API_TOKEN"]
|
6 |
|
7 |
# Load the model and tokenizer using the API token
|
8 |
+
model_name = "TinyLlama/TinyLlama_v1.1"
|
9 |
|
10 |
# Create a text generation pipeline
|
11 |
+
# generator = pipeline("text-generation", model=model_name, token=hf_api_token)
|
12 |
+
generator = pipeline("text-generation", model=model_name)
|
13 |
|
14 |
# Streamlit UI
|
15 |
+
st.title("TinyLlama_v1.1")
|
16 |
+
#st.write(hf_api_token)
|
17 |
|
18 |
# Input prompt
|
19 |
prompt = st.text_input("Enter your prompt:", value="Explain the significance of the theory of relativity.")
|