import gradio as gr import torch from transformers import BertTokenizerFast, BertForTokenClassification # Load Model and Tokenizer device = "cuda" if torch.cuda.is_available() else "cpu" model_name = "AventIQ-AI/bert-named-entity-recognition" model = BertForTokenClassification.from_pretrained(model_name).to(device) tokenizer = BertTokenizerFast.from_pretrained(model_name) # Label List label_list = ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "B-MISC", "I-MISC"] def predict_entities(text): tokens = tokenizer(text, return_tensors="pt", truncation=True) tokens = {key: val.to(device) for key, val in tokens.items()} # Move to CUDA with torch.no_grad(): outputs = model(**tokens) logits = outputs.logits # Extract logits predictions = torch.argmax(logits, dim=2) # Get highest probability labels tokens_list = tokenizer.convert_ids_to_tokens(tokens["input_ids"][0]) predicted_labels = [label_list[pred] for pred in predictions[0].cpu().numpy()] final_tokens = [] final_labels = [] for token, label in zip(tokens_list, predicted_labels): if token.startswith("##"): final_tokens[-1] += token[2:] # Merge subword else: final_tokens.append(token) final_labels.append(label) table_rows = [] highlighted_text = text for token, label in zip(final_tokens, final_labels): if token not in ["[CLS]", "[SEP]", "O"]: table_rows.append(f"{token}{label}") highlighted_text = highlighted_text.replace(token, f"{token}", 1) table_data = "" + "".join(table_rows) + "
EntityLabel
" return f"
Highlighted Text:
{highlighted_text}

Entities Table:
{table_data}
" # Create Gradio Interface iface = gr.Interface( fn=predict_entities, inputs=gr.Textbox(lines=5, placeholder="Enter text for entity recognition..."), outputs=gr.HTML(), title="BERT Named Entity Recognition", description="Identify named entities (e.g., names, locations, organizations) in text using the BERT model fine-tuned by AventIQ. The results are displayed with highlighted entities and a structured table.", live=True ) # Launch the app if __name__ == "__main__": iface.launch()