mariamelia's picture
update app.py
e184b95 verified
raw
history blame
1.87 kB
import gradio as gr
from transformers import pipeline
from sentence_transformers import SentenceTransformer, util
import torch
# ุชุญู…ูŠู„ ู†ู…ูˆุฐุฌ ุงู„ุชุถู…ูŠู† ู„ุชู‚ูŠูŠู… ุงู„ู…ุนู†ู‰
embedder = SentenceTransformer('all-MiniLM-L6-v2')
# ุชุญู…ูŠู„ ุงู„ู†ู…ูˆุฐุฌ ุงู„ู„ุบูˆูŠ ุนุจุฑ pipeline
# ูŠู…ูƒู† ุชุบูŠูŠุฑู‡ ุฅู„ู‰ LLaMA-2 ุฃูˆ ุฃูŠ ู†ู…ูˆุฐุฌ ู…ุชูˆุงูู‚ ู…ุน HF
generator = pipeline(
"text-generation",
model="mistralai/Mistral-7B-Instruct-v0.1",
tokenizer="mistralai/Mistral-7B-Instruct-v0.1",
device=0 if torch.cuda.is_available() else -1,
max_new_tokens=150,
do_sample=True,
temperature=0.7
)
# ุฏุงู„ุฉ ู„ุชู‚ูŠูŠู… ุงู„ูู‡ู… (ุชุดุงุจู‡ ุงู„ุฌู…ู„ุฉ ูˆุงู„ุฑุฏ)
def evaluate_understanding(prompt, response):
prompt_emb = embedder.encode(prompt, convert_to_tensor=True)
response_emb = embedder.encode(response, convert_to_tensor=True)
similarity = util.cos_sim(prompt_emb, response_emb).item()
status = "โœ… ู…ูู‡ูˆู… ุฌูŠุฏู‹ุง" if similarity > 0.5 else "โŒ ู„ู… ูŠููู‡ู… ุฌูŠุฏู‹ุง"
return status + f" (ุฏุฑุฌุฉ ุงู„ุชุดุงุจู‡: {similarity:.2f})"
# ุฏุงู„ุฉ ุงู„ุชุทุจูŠู‚ ุงู„ุฃุณุงุณูŠุฉ
def generate_and_evaluate(prompt):
result = generator(prompt)[0]["generated_text"]
evaluation = evaluate_understanding(prompt, result)
return result, evaluation
# ูˆุงุฌู‡ุฉ Gradio
iface = gr.Interface(
fn=generate_and_evaluate,
inputs=gr.Textbox(label="๐Ÿ“ ุฃุฏุฎู„ ุชุนู„ูŠู…ุงุช ุฃูˆ ุณุคุงู„ (Prompt)"),
outputs=[
gr.Textbox(label="๐Ÿค– ุฑุฏ ุงู„ู†ู…ูˆุฐุฌ"),
gr.Textbox(label="๐Ÿ“Š ุชู‚ูŠูŠู… ุงู„ูู‡ู…"),
],
title="๐Ÿง  LLM Prompt Understanding Evaluator",
description="ุฃุฏุฎู„ ุณุคุงู„ุงู‹ ุฃูˆ ุชุนู„ูŠู…ุงุชุŒ ูˆุณูŠุชู… ุชูˆู„ูŠุฏ ุงู„ุฑุฏ ูˆุชู‚ูŠูŠู… ู…ุฏู‰ ูู‡ู… ุงู„ู†ู…ูˆุฐุฌ ู„ู‡ ุชู„ู‚ุงุฆูŠู‹ุง."
)
iface.launch()