Menna-Ahmed commited on
Commit
b8ac201
·
verified ·
1 Parent(s): 96aea68

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -25
app.py CHANGED
@@ -46,8 +46,6 @@ def translate_answer_medical(answer_en):
46
  else:
47
  return translate_en_to_ar(answer_en)
48
 
49
- # الدالة الرئيسية
50
-
51
  # ✅ Arabic font helper
52
  def get_font(size=22):
53
  try:
@@ -92,30 +90,30 @@ def generate_report_image(image, question_ar, question_en, answer_ar, answer_en)
92
  background.save(file_name)
93
  return file_name
94
 
95
-
96
  def vqa_multilingual(image, question):
97
- if not image or not question.strip():
98
- return "يرجى رفع صورة وكتابة سؤال.", "", "", "", None
99
-
100
- is_arabic = any('\u0600' <= c <= '\u06FF' for c in question)
101
- question_ar = question.strip() if is_arabic else translate_en_to_ar(question)
102
- question_en = translate_ar_to_en(question) if is_arabic else question.strip()
103
-
104
- inputs = processor(image, question_en, return_tensors="pt")
105
- with torch.no_grad():
106
- output = blip_model.generate(**inputs)
107
- answer_en = processor.decode(output[0], skip_special_tokens=True).strip()
108
- answer_ar = translate_answer_medical(answer_en)
109
-
110
- report_image_path = generate_report_image(image, question_ar, question_en, answer_ar, answer_en)
111
-
112
- return (
113
- question_ar,
114
- question_en,
115
- answer_ar,
116
- answer_en,
117
- report_image_path
118
- )
119
 
120
  # واجهة Gradio
121
  gr.Interface(
 
46
  else:
47
  return translate_en_to_ar(answer_en)
48
 
 
 
49
  # ✅ Arabic font helper
50
  def get_font(size=22):
51
  try:
 
90
  background.save(file_name)
91
  return file_name
92
 
93
+ # ✅ Main VQA function
94
  def vqa_multilingual(image, question):
95
+ if not image or not question.strip():
96
+ return "يرجى رفع صورة وكتابة سؤال.", "", "", "", None
97
+
98
+ is_arabic = any('\u0600' <= c <= '\u06FF' for c in question)
99
+ question_ar = question.strip() if is_arabic else translate_en_to_ar(question)
100
+ question_en = translate_ar_to_en(question) if is_arabic else question.strip()
101
+
102
+ inputs = processor(image, question_en, return_tensors="pt")
103
+ with torch.no_grad():
104
+ output = blip_model.generate(**inputs)
105
+ answer_en = processor.decode(output[0], skip_special_tokens=True).strip()
106
+ answer_ar = translate_answer_medical(answer_en)
107
+
108
+ report_image_path = generate_report_image(image, question_ar, question_en, answer_ar, answer_en)
109
+
110
+ return (
111
+ question_ar,
112
+ question_en,
113
+ answer_ar,
114
+ answer_en,
115
+ report_image_path
116
+ )
117
 
118
  # واجهة Gradio
119
  gr.Interface(