import gradio as gr | |
from transformers import AutoModelForSequenceClassification, AutoTokenizer | |
import torch | |
# Load model and tokenizer | |
model_name = "cross-encoder/ms-marco-MiniLM-L-12-v2" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
model.eval() # Set model to evaluation mode | |
# Function to get relevance score and relevant excerpt based on attention scores | |
def get_relevance_score_and_excerpt(query, paragraph): | |
if not query.strip() or not paragraph.strip(): | |
return "Please provide both a query and a document paragraph.", "" | |
# Tokenize the input | |
inputs = t |