Spaces:
Sleeping
Sleeping
File size: 1,870 Bytes
71b51dc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import gradio as gr
import pandas as pd
from sentence_transformers import SentenceTransformer, util
# ---------- Load data & model (all CPU-friendly) ----------
faq_df = pd.read_csv("faqs.csv")
questions = faq_df["question"].tolist()
answers = faq_df["answer"].tolist()
model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
question_embeddings = model.encode(questions, convert_to_tensor=True, normalize_embeddings=True)
# ---------- Search function ----------
def semantic_search(user_query, top_k=3):
query_embedding = model.encode(user_query, convert_to_tensor=True, normalize_embeddings=True)
scores = util.cos_sim(query_embedding, question_embeddings)[0]
top_k_idx = scores.topk(k=top_k).indices.cpu().numpy()
results = []
for idx in top_k_idx:
results.append(
{
"FAQ Question": questions[idx],
"FAQ Answer" : answers[idx],
"Similarity" : f"{scores[idx]:.3f}"
}
)
return results
# ---------- Gradio UI ----------
with gr.Blocks(title="MiniLM Semantic FAQ Search") as demo:
gr.Markdown(
"""
# π Semantic FAQ Search
Enter a salon-related question. The model finds the closest FAQs and displays their answers.
""")
with gr.Row():
query_box = gr.Textbox(
label="Ask a question",
placeholder="e.g. Which spray protects hair from heat?"
)
topk_slider = gr.Slider(
1, 5, value=3, step=1, label="Number of results"
)
search_btn = gr.Button("Search")
out = gr.Dataframe(headers=["FAQ Question", "FAQ Answer", "Similarity"], visible=True, wrap=True)
search_btn.click(semantic_search, [query_box, topk_slider], out)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", show_error=True)
|