yongyeol commited on
Commit
bee87b9
ยท
verified ยท
1 Parent(s): 4f42502

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +16 -2
src/streamlit_app.py CHANGED
@@ -1,5 +1,7 @@
1
  import os
2
  os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_cache"
 
 
3
  import json
4
  import requests
5
  import streamlit as st
@@ -22,8 +24,20 @@ st.write("๐Ÿ” ํ† ํฐ ์žˆ์Œ:", os.environ.get("HUGGINGFACE_TOKEN") is not None)
22
  def load_model():
23
  token = os.environ.get("HUGGINGFACE_TOKEN")
24
  model_id = "google/gemma-2-2b-it"
25
- tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=token)
26
- model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=token)
 
 
 
 
 
 
 
 
 
 
 
 
27
  return pipeline("text-generation", model=model, tokenizer=tokenizer)
28
 
29
  llm = load_model()
 
1
  import os
2
  os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_cache"
3
+ os.environ["HF_HOME"] = "/tmp/hf_cache"
4
+ os.environ["HF_DATASETS_CACHE"] = "/tmp/hf_cache"
5
  import json
6
  import requests
7
  import streamlit as st
 
24
  def load_model():
25
  token = os.environ.get("HUGGINGFACE_TOKEN")
26
  model_id = "google/gemma-2-2b-it"
27
+ cache_dir = "/tmp/hf_cache" # โœ… Hugging Face Spaces์—์„œ ํ—ˆ์šฉ๋œ ๋””๋ ‰ํ† ๋ฆฌ
28
+
29
+ tokenizer = AutoTokenizer.from_pretrained(
30
+ model_id,
31
+ use_auth_token=token,
32
+ cache_dir=cache_dir
33
+ )
34
+
35
+ model = AutoModelForCausalLM.from_pretrained(
36
+ model_id,
37
+ use_auth_token=token,
38
+ cache_dir=cache_dir
39
+ )
40
+
41
  return pipeline("text-generation", model=model, tokenizer=tokenizer)
42
 
43
  llm = load_model()