Terry Zhang
commited on
Commit
·
4f1a07f
1
Parent(s):
af86903
update results and text
Browse files- results.txt +45 -0
- tasks/text.py +5 -7
results.txt
CHANGED
@@ -42,3 +42,48 @@
|
|
42 |
"test_seed": 42
|
43 |
}
|
44 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
"test_seed": 42
|
43 |
}
|
44 |
}
|
45 |
+
|
46 |
+
{
|
47 |
+
"username": "theterryzhang",
|
48 |
+
"space_url": "https://huggingface.co/spaces/theterryzhang/submission-template",
|
49 |
+
"submission_timestamp": "2025-01-31T20:05:49.060404",
|
50 |
+
"model_description": "Mixture of expert classifier with DistilBERT Embeddings",
|
51 |
+
"accuracy": 0.64,
|
52 |
+
"energy_consumed_wh": 0.5265790008821295,
|
53 |
+
"emissions_gco2eq": 0.19437841190095229,
|
54 |
+
"emissions_data": {
|
55 |
+
"run_id": "9d207fd5-6169-46d5-9e34-f575d7276e67",
|
56 |
+
"duration": 10.614267765000022,
|
57 |
+
"emissions": 0.0001943784119009523,
|
58 |
+
"emissions_rate": 0.000018313007308559322,
|
59 |
+
"cpu_power": 105,
|
60 |
+
"gpu_power": 67.91747982828848,
|
61 |
+
"ram_power": 5.74672794342041,
|
62 |
+
"cpu_energy": 0.0003094673678833363,
|
63 |
+
"gpu_energy": 0.00020017571569600108,
|
64 |
+
"ram_energy": 0.000016935917302792142,
|
65 |
+
"energy_consumed": 0.0005265790008821295,
|
66 |
+
"country_name": "United States",
|
67 |
+
"country_iso_code": "USA",
|
68 |
+
"region": "virginia",
|
69 |
+
"cloud_provider": "",
|
70 |
+
"cloud_region": "",
|
71 |
+
"os": "Linux-5.10.230-223.885.amzn2.x86_64-x86_64-with-glibc2.36",
|
72 |
+
"python_version": "3.9.21",
|
73 |
+
"codecarbon_version": "2.8.3",
|
74 |
+
"cpu_count": 4,
|
75 |
+
"cpu_model": "Intel(R) Xeon(R) Platinum 8259CL CPU @ 2.50GHz",
|
76 |
+
"gpu_count": 1,
|
77 |
+
"gpu_model": "1 x Tesla T4",
|
78 |
+
"ram_total_size": 15.324607849121094,
|
79 |
+
"tracking_mode": "machine",
|
80 |
+
"on_cloud": "N",
|
81 |
+
"pue": 1
|
82 |
+
},
|
83 |
+
"api_route": "/text",
|
84 |
+
"dataset_config": {
|
85 |
+
"dataset_name": "QuotaClimat/frugalaichallenge-text-train",
|
86 |
+
"test_size": 0.2,
|
87 |
+
"test_seed": 42
|
88 |
+
}
|
89 |
+
}
|
tasks/text.py
CHANGED
@@ -22,13 +22,13 @@ router = APIRouter()
|
|
22 |
DESCRIPTION = "Random Baseline"
|
23 |
ROUTE = "/text"
|
24 |
|
25 |
-
|
26 |
"baseline": "random baseline", # Baseline
|
27 |
"tfidf_xgb": "TF-IDF vectorizer and XGBoost classifier", # Submitted
|
28 |
"bert_base_pruned": "Pruned BERT base model", # Submitted
|
29 |
-
'climate_bert_pruned': "Fine-tuned and pruned DistilRoBERTa pre-trained on climate texts", # Not working
|
30 |
-
"sbert_distilroberta": "Fine-tuned sentence transformer DistilRoBERTa",
|
31 |
-
"embedding_moe": "Mixture of expert classifier with DistilBERT Embeddings"
|
32 |
}
|
33 |
|
34 |
|
@@ -93,7 +93,6 @@ def bert_classifier(test_dataset: dict, model: str):
|
|
93 |
else:
|
94 |
raise(ValueError)
|
95 |
|
96 |
-
# Use CUDA if available
|
97 |
device, _, _ = get_backend()
|
98 |
|
99 |
model = model.to(device)
|
@@ -119,7 +118,6 @@ def bert_classifier(test_dataset: dict, model: str):
|
|
119 |
def moe_classifier(test_dataset: dict, model: str):
|
120 |
print("Starting MoE run")
|
121 |
|
122 |
-
# Use CUDA if available
|
123 |
device, _, _ = get_backend()
|
124 |
|
125 |
texts = test_dataset["quote"]
|
@@ -228,7 +226,7 @@ async def evaluate_text(request: TextEvaluationRequest,
|
|
228 |
"username": username,
|
229 |
"space_url": space_url,
|
230 |
"submission_timestamp": datetime.now().isoformat(),
|
231 |
-
"model_description":
|
232 |
"accuracy": float(accuracy),
|
233 |
"energy_consumed_wh": emissions_data.energy_consumed * 1000,
|
234 |
"emissions_gco2eq": emissions_data.emissions * 1000,
|
|
|
22 |
DESCRIPTION = "Random Baseline"
|
23 |
ROUTE = "/text"
|
24 |
|
25 |
+
MODEL_DESCRIPTIONS = {
|
26 |
"baseline": "random baseline", # Baseline
|
27 |
"tfidf_xgb": "TF-IDF vectorizer and XGBoost classifier", # Submitted
|
28 |
"bert_base_pruned": "Pruned BERT base model", # Submitted
|
29 |
+
# 'climate_bert_pruned': "Fine-tuned and pruned DistilRoBERTa pre-trained on climate texts", # Not working
|
30 |
+
"sbert_distilroberta": "Fine-tuned sentence transformer DistilRoBERTa", # working, not submitted
|
31 |
+
"embedding_moe": "Mixture of expert classifier with DistilBERT Embeddings" # working, not submitted
|
32 |
}
|
33 |
|
34 |
|
|
|
93 |
else:
|
94 |
raise(ValueError)
|
95 |
|
|
|
96 |
device, _, _ = get_backend()
|
97 |
|
98 |
model = model.to(device)
|
|
|
118 |
def moe_classifier(test_dataset: dict, model: str):
|
119 |
print("Starting MoE run")
|
120 |
|
|
|
121 |
device, _, _ = get_backend()
|
122 |
|
123 |
texts = test_dataset["quote"]
|
|
|
226 |
"username": username,
|
227 |
"space_url": space_url,
|
228 |
"submission_timestamp": datetime.now().isoformat(),
|
229 |
+
"model_description": MODEL_DESCRIPTIONS[model],
|
230 |
"accuracy": float(accuracy),
|
231 |
"energy_consumed_wh": emissions_data.energy_consumed * 1000,
|
232 |
"emissions_gco2eq": emissions_data.emissions * 1000,
|