Spaces:
Running
Running
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
import torch | |
# Load tokenizer and model | |
tokenizer = AutoTokenizer.from_pretrained("BAAI/bge-reranker-v2-m3") | |
model = AutoModelForSequenceClassification.from_pretrained("BAAI/bge-reranker-v2-m3") | |
# Define reranking function | |
def rerank(query, documents_text): | |
documents = documents_text.strip().split('\n') | |
pairs = [(query, doc) for doc in documents] | |
inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors="pt") | |
with torch.no_grad(): | |
scores = model(**inputs).logits.squeeze(-1) | |
results = sorted(zip(documents, scores.tolist()), key=lambda x: x[1], reverse=True) | |
output = "\n\n".join([f"Score: {score:.4f}\n{doc}" for doc, score in results]) | |
return output | |
# Gradio Interface | |
iface = gr.Interface( | |
fn=rerank, | |
inputs=[ | |
gr.Textbox(label="Query", placeholder="Enter your search query", lines=1), | |
gr.Textbox(label="Documents (one per line)", placeholder="Enter one document per line", lines=10) | |
], | |
outputs=gr.Textbox(label="Reranked Output"), | |
title="BGE Reranker v2 M3", | |
description="Input a query and multiple documents. Returns reranked results with scores." | |
) | |
# Launch the interface (no share=True needed) | |
iface.launch() |