diginoron commited on
Commit
e63032a
·
verified ·
1 Parent(s): 3f2c29f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -1,31 +1,31 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
  from deep_translator import GoogleTranslator
4
  import torch
5
 
6
  # مشخصات مدل
7
- model_id = "google/gemma-3-4b-it" # یا "google/gemma-2-7b-it" بسته به نیاز شما
8
 
9
  # بارگذاری مدل و توکنایزر
10
  tokenizer = AutoTokenizer.from_pretrained(model_id)
11
- model = AutoModelForCausalLM.from_pretrained(
12
  model_id,
13
- torch_dtype=torch.bfloat16,
14
  device_map="auto"
15
  )
16
  model.eval()
17
 
18
  def generate_topics(field, major, keywords, audience, level):
19
  # ساخت پرامپت
20
- prompt = f"""[INST]Suggest 3 academic thesis topics based on the following information:
21
  Field: {field}
22
  Specialization: {major}
23
  Keywords: {keywords}
24
  Target audience: {audience}
25
- Level: {level}[/INST]
26
- """
27
  # تولید خروجی
28
- inputs = tokenizer(prompt, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
29
  with torch.no_grad():
30
  outputs = model.generate(**inputs, max_new_tokens=256)
31
  english_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, GemmaForCausalLM
3
  from deep_translator import GoogleTranslator
4
  import torch
5
 
6
  # مشخصات مدل
7
+ model_id = "google/gemma-3-4b-it"
8
 
9
  # بارگذاری مدل و توکنایزر
10
  tokenizer = AutoTokenizer.from_pretrained(model_id)
11
+ model = GemmaForCausalLM.from_pretrained(
12
  model_id,
13
+ torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
14
  device_map="auto"
15
  )
16
  model.eval()
17
 
18
  def generate_topics(field, major, keywords, audience, level):
19
  # ساخت پرامپت
20
+ prompt = f"""<bos>[INST]Suggest 3 academic thesis topics based on the following information:
21
  Field: {field}
22
  Specialization: {major}
23
  Keywords: {keywords}
24
  Target audience: {audience}
25
+ Level: {level}[/INST]"""
26
+
27
  # تولید خروجی
28
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
29
  with torch.no_grad():
30
  outputs = model.generate(**inputs, max_new_tokens=256)
31
  english_output = tokenizer.decode(outputs[0], skip_special_tokens=True)