import gradio as gr import torch from transformers import BertTokenizerFast, BertForTokenClassification # Load Model and Tokenizer device = "cuda" if torch.cuda.is_available() else "cpu" model_name = "AventIQ-AI/bert-named-entity-recognition" model = BertForTokenClassification.from_pretrained(model_name).to(device) tokenizer = BertTokenizerFast.from_pretrained(model_name) # Label List label_list = ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "B-MISC", "I-MISC"] def predict_entities(text): tokens = tokenizer(text, return_tensors="pt", truncation=True) tokens = {key: val.to(device) for key, val in tokens.items()} # Move to CUDA with torch.no_grad(): outputs = model(**tokens) logits = outputs.logits # Extract logits predictions = torch.argmax(logits, dim=2) # Get highest probability labels tokens_list = tokenizer.convert_ids_to_tokens(tokens["input_ids"][0]) predicted_labels = [label_list[pred] for pred in predictions[0].cpu().numpy()] final_tokens = [] final_labels = [] for token, label in zip(tokens_list, predicted_labels): if token.startswith("##"): final_tokens[-1] += token[2:] # Merge subword else: final_tokens.append(token) final_labels.append(label) table_rows = [] highlighted_text = text for token, label in zip(final_tokens, final_labels): if token not in ["[CLS]", "[SEP]", "O"]: table_rows.append(f"
Entity | Label |
---|