Spaces:
Running
Running
kovacsvi
commited on
Commit
·
690a8d2
1
Parent(s):
04b374c
removed low_cpu_memory_usage
Browse files- interfaces/cap.py +1 -1
- interfaces/cap_media_demo.py +1 -1
- interfaces/cap_minor.py +1 -1
- interfaces/cap_minor_media.py +1 -1
- interfaces/emotion.py +1 -1
- interfaces/emotion9.py +1 -1
- interfaces/illframes.py +2 -2
- interfaces/manifesto.py +1 -1
- interfaces/ontolisst.py +1 -1
- interfaces/sentiment.py +1 -1
- utils.py +1 -1
interfaces/cap.py
CHANGED
@@ -86,7 +86,7 @@ def build_huggingface_path(language: str, domain: str):
|
|
86 |
#@spaces.GPU
|
87 |
def predict(text, model_id, tokenizer_id):
|
88 |
device = torch.device("cpu")
|
89 |
-
model = AutoModelForSequenceClassification.from_pretrained(model_id,
|
90 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
91 |
|
92 |
inputs = tokenizer(text,
|
|
|
86 |
#@spaces.GPU
|
87 |
def predict(text, model_id, tokenizer_id):
|
88 |
device = torch.device("cpu")
|
89 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_id, device_map="auto", token=HF_TOKEN).to(device)
|
90 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
91 |
|
92 |
inputs = tokenizer(text,
|
interfaces/cap_media_demo.py
CHANGED
@@ -35,7 +35,7 @@ def build_huggingface_path(language: str, domain: str):
|
|
35 |
|
36 |
def predict(text, model_id, tokenizer_id):
|
37 |
device = torch.device("cpu")
|
38 |
-
model = AutoModelForSequenceClassification.from_pretrained(model_id,
|
39 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
40 |
|
41 |
inputs = tokenizer(text,
|
|
|
35 |
|
36 |
def predict(text, model_id, tokenizer_id):
|
37 |
device = torch.device("cpu")
|
38 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_id, device_map="auto", token=HF_TOKEN)
|
39 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
40 |
|
41 |
inputs = tokenizer(text,
|
interfaces/cap_minor.py
CHANGED
@@ -67,7 +67,7 @@ def build_huggingface_path(language: str, domain: str):
|
|
67 |
|
68 |
def predict(text, model_id, tokenizer_id):
|
69 |
device = torch.device("cpu")
|
70 |
-
model = AutoModelForSequenceClassification.from_pretrained(model_id,
|
71 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
72 |
|
73 |
inputs = tokenizer(text,
|
|
|
67 |
|
68 |
def predict(text, model_id, tokenizer_id):
|
69 |
device = torch.device("cpu")
|
70 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_id, device_map="auto", token=HF_TOKEN)
|
71 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
72 |
|
73 |
inputs = tokenizer(text,
|
interfaces/cap_minor_media.py
CHANGED
@@ -150,7 +150,7 @@ def predict(text, major_model_id, minor_model_id, tokenizer_id, HF_TOKEN=None):
|
|
150 |
|
151 |
def predict_flat(text, model_id, tokenizer_id, HF_TOKEN=None):
|
152 |
device = torch.device("cpu")
|
153 |
-
model = AutoModelForSequenceClassification.from_pretrained(model_id,
|
154 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
155 |
|
156 |
inputs = tokenizer(text,
|
|
|
150 |
|
151 |
def predict_flat(text, model_id, tokenizer_id, HF_TOKEN=None):
|
152 |
device = torch.device("cpu")
|
153 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_id, device_map="auto", token=HF_TOKEN).to(device)
|
154 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
155 |
|
156 |
inputs = tokenizer(text,
|
interfaces/emotion.py
CHANGED
@@ -27,7 +27,7 @@ def build_huggingface_path(language: str):
|
|
27 |
|
28 |
def predict(text, model_id, tokenizer_id):
|
29 |
device = torch.device("cpu")
|
30 |
-
model = AutoModelForSequenceClassification.from_pretrained(model_id,
|
31 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
32 |
model.to(device)
|
33 |
|
|
|
27 |
|
28 |
def predict(text, model_id, tokenizer_id):
|
29 |
device = torch.device("cpu")
|
30 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_id, device_map="auto", token=HF_TOKEN)
|
31 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
32 |
model.to(device)
|
33 |
|
interfaces/emotion9.py
CHANGED
@@ -26,7 +26,7 @@ def build_huggingface_path(language: str):
|
|
26 |
|
27 |
def predict(text, model_id, tokenizer_id):
|
28 |
device = torch.device("cpu")
|
29 |
-
model = AutoModelForSequenceClassification.from_pretrained(model_id,
|
30 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
31 |
|
32 |
inputs = tokenizer(text,
|
|
|
26 |
|
27 |
def predict(text, model_id, tokenizer_id):
|
28 |
device = torch.device("cpu")
|
29 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_id, token=HF_TOKEN)
|
30 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
31 |
|
32 |
inputs = tokenizer(text,
|
interfaces/illframes.py
CHANGED
@@ -59,7 +59,7 @@ def build_huggingface_path(domain: str):
|
|
59 |
def predict(text, model_id, tokenizer_id, label_names):
|
60 |
device = torch.device("cpu")
|
61 |
try:
|
62 |
-
model = AutoModelForSequenceClassification.from_pretrained(model_id,
|
63 |
except:
|
64 |
disk_space = get_disk_space('/data/')
|
65 |
print("Disk Space Error:")
|
@@ -67,7 +67,7 @@ def predict(text, model_id, tokenizer_id, label_names):
|
|
67 |
print(f"{key}: {value}")
|
68 |
|
69 |
shutil.rmtree("/data")
|
70 |
-
model = AutoModelForSequenceClassification.from_pretrained(model_id,
|
71 |
|
72 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
73 |
|
|
|
59 |
def predict(text, model_id, tokenizer_id, label_names):
|
60 |
device = torch.device("cpu")
|
61 |
try:
|
62 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_id, device_map="auto", token=HF_TOKEN)
|
63 |
except:
|
64 |
disk_space = get_disk_space('/data/')
|
65 |
print("Disk Space Error:")
|
|
|
67 |
print(f"{key}: {value}")
|
68 |
|
69 |
shutil.rmtree("/data")
|
70 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_id, device_map="auto", token=HF_TOKEN, force_download=True)
|
71 |
|
72 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
73 |
|
interfaces/manifesto.py
CHANGED
@@ -26,7 +26,7 @@ def build_huggingface_path(language: str):
|
|
26 |
|
27 |
def predict(text, model_id, tokenizer_id):
|
28 |
device = torch.device("cpu")
|
29 |
-
model = AutoModelForSequenceClassification.from_pretrained(model_id,
|
30 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
31 |
|
32 |
inputs = tokenizer(text,
|
|
|
26 |
|
27 |
def predict(text, model_id, tokenizer_id):
|
28 |
device = torch.device("cpu")
|
29 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_id, device_map="auto", token=HF_TOKEN)
|
30 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
31 |
|
32 |
inputs = tokenizer(text,
|
interfaces/ontolisst.py
CHANGED
@@ -44,7 +44,7 @@ def build_huggingface_path(language: str):
|
|
44 |
|
45 |
def predict(text, model_id, tokenizer_id):
|
46 |
device = torch.device("cpu")
|
47 |
-
model = AutoModelForSequenceClassification.from_pretrained(model_id,
|
48 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
49 |
|
50 |
# --- DEBUG ---
|
|
|
44 |
|
45 |
def predict(text, model_id, tokenizer_id):
|
46 |
device = torch.device("cpu")
|
47 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_id, device_map="auto", token=HF_TOKEN)
|
48 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
49 |
|
50 |
# --- DEBUG ---
|
interfaces/sentiment.py
CHANGED
@@ -30,7 +30,7 @@ def build_huggingface_path(language: str):
|
|
30 |
|
31 |
def predict(text, model_id, tokenizer_id):
|
32 |
device = torch.device("cpu")
|
33 |
-
model = AutoModelForSequenceClassification.from_pretrained(model_id,
|
34 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
35 |
model.to(device)
|
36 |
|
|
|
30 |
|
31 |
def predict(text, model_id, tokenizer_id):
|
32 |
device = torch.device("cpu")
|
33 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_id, device_map="auto", token=HF_TOKEN)
|
34 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
|
35 |
model.to(device)
|
36 |
|
utils.py
CHANGED
@@ -54,7 +54,7 @@ tokenizers = ["xlm-roberta-large"]
|
|
54 |
|
55 |
def download_hf_models():
|
56 |
for model_id in models:
|
57 |
-
AutoModelForSequenceClassification.from_pretrained(model_id,
|
58 |
for tokenizer_id in tokenizers:
|
59 |
AutoTokenizer.from_pretrained(tokenizer_id)
|
60 |
|
|
|
54 |
|
55 |
def download_hf_models():
|
56 |
for model_id in models:
|
57 |
+
AutoModelForSequenceClassification.from_pretrained(model_id, device_map="auto", token=HF_TOKEN)
|
58 |
for tokenizer_id in tokenizers:
|
59 |
AutoTokenizer.from_pretrained(tokenizer_id)
|
60 |
|