Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,49 +1,50 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import AutoTokenizer,
|
3 |
from deep_translator import GoogleTranslator
|
4 |
import torch
|
5 |
|
6 |
-
#
|
7 |
-
model_id = "google/gemma-
|
8 |
|
9 |
-
# بارگذاری
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
11 |
-
model =
|
12 |
model_id,
|
13 |
-
torch_dtype=torch.
|
14 |
device_map="auto"
|
15 |
)
|
16 |
model.eval()
|
17 |
|
|
|
18 |
def generate_topics(field, major, keywords, audience, level):
|
19 |
-
# ساخت پرامپت
|
20 |
-
prompt = f"""<bos>[INST]Suggest 3 academic thesis topics based on the following
|
21 |
Field: {field}
|
22 |
Specialization: {major}
|
23 |
Keywords: {keywords}
|
24 |
-
Target
|
25 |
Level: {level}[/INST]"""
|
26 |
|
27 |
-
#
|
28 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
29 |
with torch.no_grad():
|
30 |
outputs = model.generate(**inputs, max_new_tokens=256)
|
31 |
english_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
32 |
|
33 |
-
# ترجمه به فارسی
|
34 |
translated_output = GoogleTranslator(source='en', target='fa').translate(english_output)
|
35 |
translated_output_html = translated_output.strip().replace("\n", "<br>")
|
36 |
|
37 |
-
# HTML راستچین
|
38 |
html_output = (
|
39 |
-
"<div dir='rtl' style='text-align: right; font-family: Tahoma, sans-serif; font-size: 16px; "
|
40 |
-
f"
|
41 |
"<br><br>📢 برای مشاوره و راهنمایی تخصصی با گروه مشاوره کاسپین تماس بگیرید:<br>"
|
42 |
"<strong>021-88252497</strong></div>"
|
43 |
)
|
44 |
return html_output
|
45 |
|
46 |
-
# رابط Gradio
|
47 |
iface = gr.Interface(
|
48 |
fn=generate_topics,
|
49 |
inputs=[
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
from deep_translator import GoogleTranslator
|
4 |
import torch
|
5 |
|
6 |
+
# انتخاب مدل پایدار و سازگار
|
7 |
+
model_id = "google/gemma-7b-it"
|
8 |
|
9 |
+
# بارگذاری توکنایزر و مدل
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
11 |
+
model = AutoModelForCausalLM.from_pretrained(
|
12 |
model_id,
|
13 |
+
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
14 |
device_map="auto"
|
15 |
)
|
16 |
model.eval()
|
17 |
|
18 |
+
# تابع تولید موضوع پایاننامه
|
19 |
def generate_topics(field, major, keywords, audience, level):
|
20 |
+
# ساخت پرامپت برای مدل
|
21 |
+
prompt = f"""<bos>[INST]Suggest 3 academic thesis topics based on the following:
|
22 |
Field: {field}
|
23 |
Specialization: {major}
|
24 |
Keywords: {keywords}
|
25 |
+
Target Audience: {audience}
|
26 |
Level: {level}[/INST]"""
|
27 |
|
28 |
+
# توکنسازی و ارسال به مدل
|
29 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
30 |
with torch.no_grad():
|
31 |
outputs = model.generate(**inputs, max_new_tokens=256)
|
32 |
english_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
33 |
|
34 |
+
# ترجمه خروجی به فارسی
|
35 |
translated_output = GoogleTranslator(source='en', target='fa').translate(english_output)
|
36 |
translated_output_html = translated_output.strip().replace("\n", "<br>")
|
37 |
|
38 |
+
# ساخت خروجی HTML راستچین برای Gradio
|
39 |
html_output = (
|
40 |
+
"<div dir='rtl' style='text-align: right; font-family: Tahoma, sans-serif; font-size: 16px; line-height: 1.8;'>"
|
41 |
+
f"{translated_output_html}"
|
42 |
"<br><br>📢 برای مشاوره و راهنمایی تخصصی با گروه مشاوره کاسپین تماس بگیرید:<br>"
|
43 |
"<strong>021-88252497</strong></div>"
|
44 |
)
|
45 |
return html_output
|
46 |
|
47 |
+
# رابط کاربری با Gradio
|
48 |
iface = gr.Interface(
|
49 |
fn=generate_topics,
|
50 |
inputs=[
|