Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
import torch | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, ModernBertConfig | |
# models.py (containing ModernBertForSentiment) will be loaded from the Hub due to trust_remote_code=True | |
from typing import Dict, Any | |
import yaml | |
class SentimentInference: | |
def __init__(self, config_path: str = "config.yaml"): | |
"""Load configuration and initialize model and tokenizer from Hugging Face Hub.""" | |
with open(config_path, 'r') as f: | |
config_data = yaml.safe_load(f) | |
model_yaml_cfg = config_data.get('model', {}) | |
inference_yaml_cfg = config_data.get('inference', {}) | |
model_hf_repo_id = model_yaml_cfg.get('name_or_path') | |
if not model_hf_repo_id: | |
raise ValueError("model.name_or_path must be specified in config.yaml (e.g., 'username/model_name')") | |
tokenizer_hf_repo_id = model_yaml_cfg.get('tokenizer_name_or_path', model_hf_repo_id) | |
self.max_length = inference_yaml_cfg.get('max_length', model_yaml_cfg.get('max_length', 512)) | |
print(f"Loading tokenizer from: {tokenizer_hf_repo_id}") | |
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_hf_repo_id) | |
print(f"Loading base ModernBertConfig from: {model_hf_repo_id}") | |
# Load the config that was uploaded with the model (config.json in the HF repo) | |
# This config should already have the correct architecture defined by ModernBertConfig. | |
# We then augment it with any custom parameters needed by ModernBertForSentiment's __init__. | |
loaded_config = ModernBertConfig.from_pretrained(model_hf_repo_id) | |
# Augment loaded_config with parameters from model_yaml_cfg needed for ModernBertForSentiment initialization | |
# These should reflect how the model was trained and its specific custom head. | |
loaded_config.pooling_strategy = model_yaml_cfg.get('pooling_strategy', 'mean') # Default to 'mean' as per your models.py change | |
loaded_config.num_weighted_layers = model_yaml_cfg.get('num_weighted_layers', 4) | |
loaded_config.classifier_dropout = model_yaml_cfg.get('dropout') # Allow None if not in yaml | |
# num_labels should ideally be in the config.json uploaded to HF, but can be set here if needed. | |
# For binary sentiment with a single logit output, num_labels is 1. | |
loaded_config.num_labels = model_yaml_cfg.get('num_labels', 1) | |
# The loss_function might not be strictly needed for inference if the model doesn't use it in forward pass for eval, | |
# but if ModernBertForSentiment.__init__ requires it, it must be provided. | |
# Assuming it's not critical for basic inference here to simplify. | |
# loaded_config.loss_function = model_yaml_cfg.get('loss_function', {'name': '...', 'params': {}}) | |
print(f"Instantiating and loading model weights for {model_hf_repo_id}...") | |
# trust_remote_code=True allows loading models.py (containing ModernBertForSentiment) | |
# from the Hugging Face model repository. | |
self.model = AutoModelForSequenceClassification.from_pretrained( | |
model_hf_repo_id, | |
config=loaded_config, # Pass the augmented config | |
trust_remote_code=True | |
) | |
self.model.eval() | |
print(f"Model {model_hf_repo_id} loaded successfully from Hugging Face Hub.") | |
def predict(self, text: str) -> Dict[str, Any]: | |
inputs = self.tokenizer(text, return_tensors="pt", truncation=True, max_length=self.max_length, padding=True) | |
with torch.no_grad(): | |
outputs = self.model(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask']) | |
logits = outputs.get("logits") # Use .get for safety | |
if logits is None: | |
raise ValueError("Model output did not contain 'logits'. Check model's forward pass.") | |
prob = torch.sigmoid(logits).item() | |
return {"sentiment": "positive" if prob > 0.5 else "negative", "confidence": prob} |