Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -27,7 +27,7 @@ TOKEN=os.getenv('HF_TOKEN')
|
|
27 |
subprocess.run(["huggingface-cli", "login", "--token", TOKEN, "--add-to-git-credential"])
|
28 |
######
|
29 |
# set this key as an environment variable
|
30 |
-
os.environ["
|
31 |
|
32 |
|
33 |
# Initialize tokenizer
|
@@ -37,11 +37,11 @@ def load_model():
|
|
37 |
MODEL_NAME = "google/gemma-2b-it"
|
38 |
|
39 |
model = AutoModelForCausalLM.from_pretrained(
|
40 |
-
MODEL_NAME
|
41 |
# quantization_config=nf4_config, # add config
|
42 |
# torch_dtype=torch.bfloat16, # save memory using float16
|
43 |
# low_cpu_mem_usage=True,
|
44 |
-
token=st.secrets["HF_TOKEN"],
|
45 |
).to("cuda")
|
46 |
|
47 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
|
|
27 |
subprocess.run(["huggingface-cli", "login", "--token", TOKEN, "--add-to-git-credential"])
|
28 |
######
|
29 |
# set this key as an environment variable
|
30 |
+
os.environ["HF_TOKEN"] = st.secrets["HF_TOKEN"]
|
31 |
|
32 |
|
33 |
# Initialize tokenizer
|
|
|
37 |
MODEL_NAME = "google/gemma-2b-it"
|
38 |
|
39 |
model = AutoModelForCausalLM.from_pretrained(
|
40 |
+
MODEL_NAME,\
|
41 |
# quantization_config=nf4_config, # add config
|
42 |
# torch_dtype=torch.bfloat16, # save memory using float16
|
43 |
# low_cpu_mem_usage=True,
|
44 |
+
# token=st.secrets["HF_TOKEN"],
|
45 |
).to("cuda")
|
46 |
|
47 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|