Spaces:
Sleeping
Sleeping
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +16 -2
src/streamlit_app.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1 |
import os
|
2 |
os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_cache"
|
|
|
|
|
3 |
import json
|
4 |
import requests
|
5 |
import streamlit as st
|
@@ -22,8 +24,20 @@ st.write("๐ ํ ํฐ ์์:", os.environ.get("HUGGINGFACE_TOKEN") is not None)
|
|
22 |
def load_model():
|
23 |
token = os.environ.get("HUGGINGFACE_TOKEN")
|
24 |
model_id = "google/gemma-2-2b-it"
|
25 |
-
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
return pipeline("text-generation", model=model, tokenizer=tokenizer)
|
28 |
|
29 |
llm = load_model()
|
|
|
1 |
import os
|
2 |
os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_cache"
|
3 |
+
os.environ["HF_HOME"] = "/tmp/hf_cache"
|
4 |
+
os.environ["HF_DATASETS_CACHE"] = "/tmp/hf_cache"
|
5 |
import json
|
6 |
import requests
|
7 |
import streamlit as st
|
|
|
24 |
def load_model():
|
25 |
token = os.environ.get("HUGGINGFACE_TOKEN")
|
26 |
model_id = "google/gemma-2-2b-it"
|
27 |
+
cache_dir = "/tmp/hf_cache" # โ
Hugging Face Spaces์์ ํ์ฉ๋ ๋๋ ํ ๋ฆฌ
|
28 |
+
|
29 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
30 |
+
model_id,
|
31 |
+
use_auth_token=token,
|
32 |
+
cache_dir=cache_dir
|
33 |
+
)
|
34 |
+
|
35 |
+
model = AutoModelForCausalLM.from_pretrained(
|
36 |
+
model_id,
|
37 |
+
use_auth_token=token,
|
38 |
+
cache_dir=cache_dir
|
39 |
+
)
|
40 |
+
|
41 |
return pipeline("text-generation", model=model, tokenizer=tokenizer)
|
42 |
|
43 |
llm = load_model()
|