NLI / app.py
rahideer's picture
Update app.py
e90cc8b verified
raw
history blame
2 kB
import streamlit as st
import zipfile
import os
import pandas as pd
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
# Paths
ZIP_FILE = "xnli-multilingual-nli-dataset.zip"
EXTRACT_DIR = "extracted_data"
@st.cache_data
def extract_and_load():
if not os.path.exists(EXTRACT_DIR):
with zipfile.ZipFile(ZIP_FILE, "r") as zip_ref:
zip_ref.extractall(EXTRACT_DIR)
csv_files = [f for f in os.listdir(EXTRACT_DIR) if f.endswith('.csv')]
return csv_files
# Load model and tokenizer
@st.cache_resource
def load_model():
tokenizer = AutoTokenizer.from_pretrained("MoritzLaurer/mDeBERTa-v3-base-mnli-xnli")
model = AutoModelForSequenceClassification.from_pretrained("MoritzLaurer/mDeBERTa-v3-base-mnli-xnli")
nli_pipeline = pipeline("text-classification", model=model, tokenizer=tokenizer)
return nli_pipeline
st.set_page_config(page_title="Multilingual NLI App", layout="centered")
st.title("🌍 Multilingual NLI (Natural Language Inference) Explorer")
st.markdown("Upload premise & hypothesis pairs or use the dataset to explore entailment, contradiction, or neutrality.")
nli_pipeline = load_model()
csv_files = extract_and_load()
selected_csv = st.selectbox("Choose a language CSV file:", csv_files)
df = pd.read_csv(os.path.join(EXTRACT_DIR, selected_csv)).dropna()
sample_df = df.sample(5).reset_index(drop=True)
st.subheader("Sample from Dataset")
st.dataframe(sample_df[['premise', 'hypothesis', 'label']])
st.subheader("πŸ” Run Inference")
index = st.number_input("Select Sample Index", min_value=0, max_value=len(sample_df)-1, value=0, step=1)
premise = sample_df.loc[index, 'premise']
hypothesis = sample_df.loc[index, 'hypothesis']
st.markdown(f"**Premise:** {premise}")
st.markdown(f"**Hypothesis:** {hypothesis}")
if st.button("Run NLI Prediction"):
result = nli_pipeline(f"{premise} </s> {hypothesis}")
st.success(f"**Prediction:** {result[0]['label']} (Score: {result[0]['score']:.2f})")