Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -21,8 +21,7 @@ from langchain_community.llms import HuggingFaceHub
|
|
21 |
import pandas as pd
|
22 |
|
23 |
# Configuración del modelo
|
24 |
-
|
25 |
-
model_name = "google/gemma-2-2b"
|
26 |
TOKEN=os.getenv('HF_TOKEN')
|
27 |
subprocess.run(["huggingface-cli", "login", "--token", TOKEN, "--add-to-git-credential"])
|
28 |
######
|
@@ -33,15 +32,17 @@ os.environ["HF_TOKEN"] = st.secrets["HF_TOKEN"]
|
|
33 |
# Initialize tokenizer
|
34 |
@st.cache_resource
|
35 |
def load_model():
|
36 |
-
|
|
|
|
|
37 |
MODEL_NAME = "google/gemma-2b-it"
|
38 |
|
39 |
model = AutoModelForCausalLM.from_pretrained(
|
40 |
-
MODEL_NAME
|
41 |
# quantization_config=nf4_config, # add config
|
42 |
# torch_dtype=torch.bfloat16, # save memory using float16
|
43 |
# low_cpu_mem_usage=True,
|
44 |
-
token= TOKEN
|
45 |
).to("cuda")
|
46 |
|
47 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
|
|
21 |
import pandas as pd
|
22 |
|
23 |
# Configuración del modelo
|
24 |
+
|
|
|
25 |
TOKEN=os.getenv('HF_TOKEN')
|
26 |
subprocess.run(["huggingface-cli", "login", "--token", TOKEN, "--add-to-git-credential"])
|
27 |
######
|
|
|
32 |
# Initialize tokenizer
|
33 |
@st.cache_resource
|
34 |
def load_model():
|
35 |
+
TOKEN=os.getenv('HF_TOKEN')
|
36 |
+
subprocess.run(["huggingface-cli", "login", "--token", TOKEN, "--add-to-git-credential"])
|
37 |
+
os.environ["HF_TOKEN"] = st.secrets["HF_TOKEN"]
|
38 |
MODEL_NAME = "google/gemma-2b-it"
|
39 |
|
40 |
model = AutoModelForCausalLM.from_pretrained(
|
41 |
+
MODEL_NAME
|
42 |
# quantization_config=nf4_config, # add config
|
43 |
# torch_dtype=torch.bfloat16, # save memory using float16
|
44 |
# low_cpu_mem_usage=True,
|
45 |
+
# token= TOKEN
|
46 |
).to("cuda")
|
47 |
|
48 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|