Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,16 @@
|
|
|
|
1 |
import streamlit as st
|
2 |
from langchain_community.llms import HuggingFaceHub
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
# Models for each task
|
5 |
SUMMARY_MODEL = "google/flan-t5-small"
|
@@ -11,7 +22,8 @@ def get_llm(model_id):
|
|
11 |
return HuggingFaceHub(
|
12 |
repo_id=model_id,
|
13 |
model_kwargs={"temperature": 0.5, "max_new_tokens": 150},
|
14 |
-
task="text2text-generation"
|
|
|
15 |
)
|
16 |
|
17 |
# Streamlit app UI
|
|
|
1 |
+
import os
|
2 |
import streamlit as st
|
3 |
from langchain_community.llms import HuggingFaceHub
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
|
6 |
+
# Load .env if running locally
|
7 |
+
load_dotenv()
|
8 |
+
|
9 |
+
# Set your Hugging Face API token (must be set in Hugging Face Spaces Secrets for online use)
|
10 |
+
HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
11 |
+
if not HUGGINGFACE_API_TOKEN:
|
12 |
+
st.error("⚠️ Hugging Face API token not found. Please add it as a secret with name 'HUGGINGFACEHUB_API_TOKEN'.")
|
13 |
+
st.stop()
|
14 |
|
15 |
# Models for each task
|
16 |
SUMMARY_MODEL = "google/flan-t5-small"
|
|
|
22 |
return HuggingFaceHub(
|
23 |
repo_id=model_id,
|
24 |
model_kwargs={"temperature": 0.5, "max_new_tokens": 150},
|
25 |
+
task="text2text-generation",
|
26 |
+
huggingfacehub_api_token=HUGGINGFACE_API_TOKEN
|
27 |
)
|
28 |
|
29 |
# Streamlit app UI
|