Spaces:
Sleeping
Sleeping
File size: 4,746 Bytes
d778205 4d6e8c2 aee4009 4d6e8c2 731e8c7 1c33274 70f5f26 d778205 4d6e8c2 70f5f26 4d6e8c2 70f5f26 d778205 aee4009 d778205 aee4009 d778205 aee4009 d778205 aee4009 d778205 aee4009 d778205 aee4009 d778205 aee4009 d778205 aee4009 d778205 aee4009 d778205 aee4009 d778205 4d6e8c2 d778205 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from fastapi import APIRouter
from datetime import datetime
from datasets import load_dataset
from sklearn.metrics import accuracy_score
import torch
import numpy as np
from .utils.evaluation import TextEvaluationRequest
from .utils.emissions import tracker, clean_emissions_data, get_space_info
router = APIRouter()
DESCRIPTION = "FrugalDisinfoHunter Model"
ROUTE = "/text"
@router.post(ROUTE, tags=["Text Task"], description=DESCRIPTION)
async def evaluate_text(request: TextEvaluationRequest):
"""
Evaluate text classification for climate disinformation detection.
"""
# Get space info
username, space_url = get_space_info()
# Define the label mapping
LABEL_MAPPING = {
"0_not_relevant": 0,
"1_not_happening": 1,
"2_not_human": 2,
"3_not_bad": 3,
"4_solutions_harmful_unnecessary": 4,
"5_science_unreliable": 5,
"6_proponents_biased": 6,
"7_fossil_fuels_needed": 7
}
# Load and prepare the dataset
dataset = load_dataset(request.dataset_name)
# Convert string labels to integers
dataset = dataset.map(lambda x: {"label": LABEL_MAPPING[x["label"]]})
# Split dataset
train_test = dataset["train"].train_test_split(test_size=request.test_size, seed=request.test_seed)
test_dataset = train_test["test"]
# Start tracking emissions
tracker.start()
tracker.start_task("inference")
try:
# Model configuration
model_name = "google/mobilebert-uncased" # Base model
local_weights = "model/model.pt" # Path to our trained weights
BATCH_SIZE = 32
MAX_LENGTH = 256 # Increased from 128
# Initialize tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(
model_name,
num_labels=8,
problem_type="single_label_classification"
)
# Load our trained weights
try:
state_dict = torch.load(local_weights, map_location='cpu')
model.load_state_dict(state_dict)
except Exception as e:
print(f"Error loading weights: {e}")
# Continue with base model if weights fail to load
pass
# Move model to appropriate device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
model.eval() # Set to evaluation mode
# Get test texts and process in batches
test_texts = test_dataset["quote"]
predictions = []
# Process in batches
for i in range(0, len(test_texts), BATCH_SIZE):
# Clear CUDA cache if using GPU
if torch.cuda.is_available():
torch.cuda.empty_cache()
batch_texts = test_texts[i:i + BATCH_SIZE]
# Tokenize with padding and attention masks
inputs = tokenizer(
batch_texts,
padding=True,
truncation=True,
max_length=MAX_LENGTH,
return_tensors="pt"
)
# Move inputs to device
inputs = {k: v.to(device) for k, v in inputs.items()}
# Run inference with no gradient computation
with torch.no_grad():
outputs = model(**inputs)
batch_preds = torch.argmax(outputs.logits, dim=1)
predictions.extend(batch_preds.cpu().numpy())
# Get true labels
true_labels = test_dataset['label']
# Stop tracking emissions
emissions_data = tracker.stop_task()
# Calculate accuracy
accuracy = accuracy_score(true_labels, predictions)
# Prepare results dictionary
results = {
"username": username,
"space_url": space_url,
"submission_timestamp": datetime.now().isoformat(),
"model_description": DESCRIPTION,
"accuracy": float(accuracy),
"energy_consumed_wh": emissions_data.energy_consumed * 1000,
"emissions_gco2eq": emissions_data.emissions * 1000,
"emissions_data": clean_emissions_data(emissions_data),
"api_route": ROUTE,
"dataset_config": {
"dataset_name": request.dataset_name,
"test_size": request.test_size,
"test_seed": request.test_seed
}
}
return results
except Exception as e:
# Stop tracking in case of error
tracker.stop_task()
raise e |