import gradio as gr import os import torch import numpy as np import random from huggingface_hub import login, HfFolder from transformers import AutoTokenizer, AutoModelForSequenceClassification from scipy.special import softmax import logging # Setup logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s') # Set a seed for reproducibility seed = 42 np.random.seed(seed) random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) # Login to Hugging Face token = os.getenv("hf_token") HfFolder.save_token(token) login(token) # Model paths and quality mapping model_paths = [ 'karths/binary_classification_train_test', 'karths/binary_classification_train_requirement', "karths/binary_classification_train_process", "karths/binary_classification_train_infrastructure", "karths/binary_classification_train_documentation", "karths/binary_classification_train_design", "karths/binary_classification_train_defect", "karths/binary_classification_train_code", "karths/binary_classification_train_build", "karths/binary_classification_train_automation", "karths/binary_classification_train_people", "karths/binary_classification_train_architecture", ] quality_mapping = { 'binary_classification_train_test': 'Test', 'binary_classification_train_requirement': 'Requirement', 'binary_classification_train_process': 'Process', 'binary_classification_train_infrastructure': 'Infrastructure', 'binary_classification_train_documentation': 'Documentation', 'binary_classification_train_design': 'Design', 'binary_classification_train_defect': 'Defect', 'binary_classification_train_code': 'Code', 'binary_classification_train_build': 'Build', 'binary_classification_train_automation': 'Automation', 'binary_classification_train_people': 'People', 'binary_classification_train_architecture':'Architecture' } # Pre-load models and tokenizer tokenizer = AutoTokenizer.from_pretrained("distilroberta-base") models = {path: AutoModelForSequenceClassification.from_pretrained(path) for path in model_paths} def get_quality_name(model_name): return quality_mapping.get(model_name.split('/')[-1], "Unknown Quality") def model_prediction(model, text, device): model.to(device) model.eval() inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512) inputs = {k: v.to(device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits probs = softmax(logits.cpu().numpy(), axis=1) avg_prob = np.mean(probs[:, 1]) return avg_prob def main_interface(text): if not text.strip(): return "