mariamelia commited on
Commit
14449aa
ยท
verified ยท
1 Parent(s): 258e1d4

update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -44
app.py CHANGED
@@ -1,48 +1,13 @@
1
- import gradio as gr
 
2
  from transformers import pipeline
3
- from sentence_transformers import SentenceTransformer, util
4
- import torch
5
 
6
- # ุชุญู…ูŠู„ ู†ู…ูˆุฐุฌ ุงู„ุชุถู…ูŠู† ู„ุชู‚ูŠูŠู… ุงู„ู…ุนู†ู‰
7
- embedder = SentenceTransformer('all-MiniLM-L6-v2')
8
 
9
- # ุชุญู…ูŠู„ ุงู„ู†ู…ูˆุฐุฌ ุงู„ู„ุบูˆูŠ ุนุจุฑ pipeline
10
- # ูŠู…ูƒู† ุชุบูŠูŠุฑู‡ ุฅู„ู‰ LLaMA-2 ุฃูˆ ุฃูŠ ู†ู…ูˆุฐุฌ ู…ุชูˆุงูู‚ ู…ุน HF
11
- generator = pipeline(
12
- "text-generation",
13
- model="mistralai/Mistral-7B-Instruct-v0.1",
14
- tokenizer="mistralai/Mistral-7B-Instruct-v0.1",
15
- device=0 if torch.cuda.is_available() else -1,
16
- max_new_tokens=150,
17
- do_sample=True,
18
- temperature=0.7
19
- )
20
 
21
- # ุฏุงู„ุฉ ู„ุชู‚ูŠูŠู… ุงู„ูู‡ู… (ุชุดุงุจู‡ ุงู„ุฌู…ู„ุฉ ูˆุงู„ุฑุฏ)
22
- def evaluate_understanding(prompt, response):
23
- prompt_emb = embedder.encode(prompt, convert_to_tensor=True)
24
- response_emb = embedder.encode(response, convert_to_tensor=True)
25
-
26
- similarity = util.cos_sim(prompt_emb, response_emb).item()
27
- status = "โœ… ู…ูู‡ูˆู… ุฌูŠุฏู‹ุง" if similarity > 0.5 else "โŒ ู„ู… ูŠููู‡ู… ุฌูŠุฏู‹ุง"
28
- return status + f" (ุฏุฑุฌุฉ ุงู„ุชุดุงุจู‡: {similarity:.2f})"
29
-
30
- # ุฏุงู„ุฉ ุงู„ุชุทุจูŠู‚ ุงู„ุฃุณุงุณูŠุฉ
31
- def generate_and_evaluate(prompt):
32
- result = generator(prompt)[0]["generated_text"]
33
- evaluation = evaluate_understanding(prompt, result)
34
- return result, evaluation
35
-
36
- # ูˆุงุฌู‡ุฉ Gradio
37
- iface = gr.Interface(
38
- fn=generate_and_evaluate,
39
- inputs=gr.Textbox(label="๐Ÿ“ ุฃุฏุฎู„ ุชุนู„ูŠู…ุงุช ุฃูˆ ุณุคุงู„ (Prompt)"),
40
- outputs=[
41
- gr.Textbox(label="๐Ÿค– ุฑุฏ ุงู„ู†ู…ูˆุฐุฌ"),
42
- gr.Textbox(label="๐Ÿ“Š ุชู‚ูŠูŠู… ุงู„ูู‡ู…"),
43
- ],
44
- title="๐Ÿง  LLM Prompt Understanding Evaluator",
45
- description="ุฃุฏุฎู„ ุณุคุงู„ุงู‹ ุฃูˆ ุชุนู„ูŠู…ุงุชุŒ ูˆุณูŠุชู… ุชูˆู„ูŠุฏ ุงู„ุฑุฏ ูˆุชู‚ูŠูŠู… ู…ุฏู‰ ูู‡ู… ุงู„ู†ู…ูˆุฐุฌ ู„ู‡ ุชู„ู‚ุงุฆูŠู‹ุง."
46
- )
47
-
48
- iface.launch()
 
1
+ from huggingface_hub import login
2
+ import os
3
  from transformers import pipeline
 
 
4
 
5
+ # ุชุณุฌูŠู„ ุงู„ุฏุฎูˆู„
6
+ login(token=os.environ["HUGGINGFACE_HUB_TOKEN"])
7
 
8
+ # ุชุญู…ูŠู„ ุงู„ู†ู…ูˆุฐุฌ
9
+ generator = pipeline("text-generation", model="mistralai/Mistral-7B-Instruct-v0.1")
 
 
 
 
 
 
 
 
 
10
 
11
+ # ุงุณุชุฎุฏุงู… ุงู„ู†ู…ูˆุฐุฌ (ู…ุซุงู„)
12
+ output = generator("ู…ุง ู‡ูˆ ุงู„ุฐูƒุงุก ุงู„ุงุตุทู†ุงุนูŠุŸ", max_new_tokens=100)
13
+ print(output[0]['generated_text'])