pascal-maker commited on
Commit
72e2729
Β·
verified Β·
1 Parent(s): ca8856a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -100
app.py CHANGED
@@ -41,8 +41,9 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStream
41
  # =============================================================================
42
  # SAM-2 Alias Patch & Installer
43
  # =============================================================================
 
44
  try:
45
- import sam_2, importlib
46
  sys.modules['sam2'] = sam_2
47
  for sub in ['build_sam','automatic_mask_generator','modeling.sam2_base']:
48
  sys.modules[f'sam2.{sub}'] = importlib.import_module(f'sam_2.{sub}')
@@ -115,6 +116,8 @@ class MedicalVLMAgent:
115
  "Disclaimer: I am not a licensed medical professional."
116
  )
117
  def run(self, text, image=None):
 
 
118
  msgs = [{"role":"system","content":[{"type":"text","text":self.sys_prompt}]}]
119
  user_cont = []
120
  if image:
@@ -173,127 +176,71 @@ try:
173
  chex_model = AutoModelForCausalLM.from_pretrained("StanfordAIMI/CheXagent-2-3b", device_map='auto', trust_remote_code=True)
174
  if torch.cuda.is_available(): chex_model = chex_model.half()
175
  chex_model.eval(); CHEX_AVAILABLE=True
176
- except Exception as e:
177
- print(f"CheXagent loading error: {e}")
178
  CHEX_AVAILABLE=False
179
 
180
  @torch.no_grad()
181
  def report_generation(im1, im2):
182
- if not CHEX_AVAILABLE:
183
- return "CheXagent is not available. Please check the logs for loading errors."
184
- # Simplified implementation - actual CheXagent usage would be more complex
185
- return "Report generation feature is in development. CheXagent model loaded successfully."
186
 
187
  @torch.no_grad()
188
  def phrase_grounding(image, prompt):
189
- if not CHEX_AVAILABLE:
190
- return "CheXagent is not available", None
191
- if image is None:
192
- return "Please upload an image", None
193
- # Simple mock grounding - actual implementation would use CheXagent's grounding capabilities
194
- w,h=image.size;
195
- draw=ImageDraw.Draw(image)
196
  draw.rectangle([(w*0.25,h*0.25),(w*0.75,h*0.75)], outline='red', width=3)
197
- return f"Mock grounding for: {prompt}", image
198
 
199
  # =============================================================================
200
  # Gradio UI
201
  # =============================================================================
202
-
203
  def create_ui():
 
204
  try:
205
  m, p, d = load_qwen_model_and_processor()
206
- med = MedicalVLMAgent(m, p, d)
207
- QW = True
208
- print("Qwen model loaded successfully")
209
- except Exception as e:
210
- print(f"Failed to load Qwen model: {e}")
211
- QW = False
212
  med = None
213
-
214
- # Define a safe medical function that handles None case
215
- def safe_medical_qa(text, image):
216
- if med is None:
217
- return "Medical Q&A model is not available. Please check the logs for loading errors."
218
- try:
219
- return med.run(text, image)
220
- except Exception as e:
221
- return f"Error processing request: {str(e)}"
222
-
223
- with gr.Blocks(title="Medical AI Assistant") as demo:
224
  gr.Markdown("# Medical AI Assistant")
225
- gr.Markdown("**Disclaimer**: This is for educational purposes only. Always consult healthcare professionals for medical advice.")
226
- gr.Markdown(f"**Model Status**: Qwen: {'βœ…' if QW else '❌'} | SAM-2: {'βœ…' if _mask_generator else '❌'} | CheXagent: {'βœ…' if CHEX_AVAILABLE else '❌'}")
227
-
228
  with gr.Tab("Medical Q&A"):
229
- gr.Markdown("Ask questions about medical images or general medical topics.")
230
- with gr.Row():
231
- with gr.Column():
232
- txt = gr.Textbox(
233
- label="Your Question",
234
- placeholder="Ask about symptoms, conditions, or describe what you see in an image...",
235
- lines=3
236
- )
237
- img = gr.Image(type='pil', label="Medical Image (optional)")
238
- ask_btn = gr.Button("Ask Question", variant="primary")
239
- with gr.Column():
240
- out = gr.Textbox(label="AI Response", lines=10)
241
-
242
- ask_btn.click(safe_medical_qa, [txt, img], out)
243
-
244
- with gr.Tab("Image Segmentation"):
245
- gr.Markdown("Automatically segment medical images using SAM-2 or fallback method.")
246
- with gr.Row():
247
- with gr.Column():
248
- seg = gr.Image(type='pil', label="Input Medical Image")
249
- seg_btn = gr.Button("Segment Image", variant="primary")
250
- with gr.Column():
251
- so = gr.Image(label="Segmented Output")
252
- ss = gr.Textbox(label="Segmentation Status")
253
-
254
  fn = segmentation_interface if _mask_generator else fallback_segmentation
255
- seg_btn.click(fn, seg, [so, ss])
256
-
257
  with gr.Tab("CheXagent Report"):
258
- gr.Markdown("Generate structured radiology reports using CheXagent.")
259
- with gr.Row():
260
- with gr.Column():
261
- c1 = gr.Image(type='pil', label="Chest X-ray Image")
262
- c2 = gr.Image(type='pil', label="Comparison Image (optional)")
263
- report_btn = gr.Button("Generate Report", variant="primary")
264
- with gr.Column():
265
- rout = gr.Markdown(label="Generated Report")
266
-
267
- report_btn.click(
268
- lambda x, y: report_generation(x, y),
269
- [c1, c2],
270
- rout
271
- )
272
-
273
  with gr.Tab("CheXagent Grounding"):
274
- gr.Markdown("Ground specific phrases or findings in medical images.")
275
- with gr.Row():
276
- with gr.Column():
277
- gi = gr.Image(type='pil', label="Medical Image")
278
- gp = gr.Textbox(
279
- label="Phrase to Ground",
280
- placeholder="e.g., 'pneumonia', 'heart shadow', 'fracture'..."
281
- )
282
- ground_btn = gr.Button("Ground Phrase", variant="primary")
283
- with gr.Column():
284
- gout = gr.Textbox(label="Grounding Result")
285
- goimg = gr.Image(label="Image with Grounding")
286
-
287
- ground_btn.click(phrase_grounding, [gi, gp], [gout, goimg])
288
-
289
  return demo
290
 
291
  if __name__ == "__main__":
292
- print("Starting Medical AI Assistant...")
293
  ui = create_ui()
294
- ui.launch(
295
- server_name='0.0.0.0',
296
- server_port=7860,
297
- share=True,
298
- show_error=True
299
- )
 
41
  # =============================================================================
42
  # SAM-2 Alias Patch & Installer
43
  # =============================================================================
44
+ import importlib
45
  try:
46
+ import sam_2
47
  sys.modules['sam2'] = sam_2
48
  for sub in ['build_sam','automatic_mask_generator','modeling.sam2_base']:
49
  sys.modules[f'sam2.{sub}'] = importlib.import_module(f'sam_2.{sub}')
 
116
  "Disclaimer: I am not a licensed medical professional."
117
  )
118
  def run(self, text, image=None):
119
+ if self.model is None:
120
+ return "Qwen-VLM model not loaded"
121
  msgs = [{"role":"system","content":[{"type":"text","text":self.sys_prompt}]}]
122
  user_cont = []
123
  if image:
 
176
  chex_model = AutoModelForCausalLM.from_pretrained("StanfordAIMI/CheXagent-2-3b", device_map='auto', trust_remote_code=True)
177
  if torch.cuda.is_available(): chex_model = chex_model.half()
178
  chex_model.eval(); CHEX_AVAILABLE=True
179
+ except Exception:
 
180
  CHEX_AVAILABLE=False
181
 
182
  @torch.no_grad()
183
  def report_generation(im1, im2):
184
+ if not CHEX_AVAILABLE: yield "CheXagent unavailable"; return
185
+ streamer = TextIteratorStreamer(chex_tok, skip_prompt=True)
186
+ yield "Report streaming not fully implemented"
 
187
 
188
  @torch.no_grad()
189
  def phrase_grounding(image, prompt):
190
+ if not CHEX_AVAILABLE: return "CheXagent unavailable", None
191
+ w,h=image.size; draw=ImageDraw.Draw(image)
 
 
 
 
 
192
  draw.rectangle([(w*0.25,h*0.25),(w*0.75,h*0.75)], outline='red', width=3)
193
+ return prompt, image
194
 
195
  # =============================================================================
196
  # Gradio UI
197
  # =============================================================================
 
198
  def create_ui():
199
+ # Load Qwen agent
200
  try:
201
  m, p, d = load_qwen_model_and_processor()
202
+ med = MedicalVLMAgent(m,p,d)
203
+ qwen_ok = True
204
+ except Exception:
 
 
 
205
  med = None
206
+ qwen_ok = False
207
+
208
+ with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
209
  gr.Markdown("# Medical AI Assistant")
210
+ gr.Markdown(f"- Qwen: {'βœ…' if qwen_ok else '❌'} - SAM-2: {'βœ…' if _mask_generator else '❌'} - CheX: {'βœ…' if CHEX_AVAILABLE else '❌'}")
 
 
211
  with gr.Tab("Medical Q&A"):
212
+ if qwen_ok and med is not None:
213
+ txt = gr.Textbox(label="Question / description", lines=3)
214
+ img = gr.Image(label="Optional image", type='pil')
215
+ out = gr.Textbox(label="Answer")
216
+ gr.Button("Ask").click(med.run, inputs=[txt, img], outputs=out)
217
+ else:
218
+ gr.Markdown("❌ Medical Q&A is not available.")
219
+ with gr.Tab("Segmentation"):
220
+ seg = gr.Image(label="Upload image", type='pil')
221
+ so = gr.Image(label="Result")
222
+ ss = gr.Textbox(label="Status", interactive=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
  fn = segmentation_interface if _mask_generator else fallback_segmentation
224
+ gr.Button("Segment").click(fn, inputs=[seg], outputs=[so, ss])
 
225
  with gr.Tab("CheXagent Report"):
226
+ c1 = gr.Image(type='pil', label="Image 1")
227
+ c2 = gr.Image(type='pil', label="Image 2")
228
+ rout = gr.Markdown()
229
+ if CHEX_AVAILABLE:
230
+ gr.Interface(fn=report_generation, inputs=[c1, c2], outputs=rout, live=True).render()
231
+ else:
232
+ gr.Markdown("❌ CheXagent report not available.")
 
 
 
 
 
 
 
 
233
  with gr.Tab("CheXagent Grounding"):
234
+ gi = gr.Image(type='pil', label="Image")
235
+ gp = gr.Textbox(label="Prompt")
236
+ gout = gr.Textbox(label="Response")
237
+ goimg = gr.Image(label="Output Image")
238
+ if CHEX_AVAILABLE:
239
+ gr.Interface(fn=phrase_grounding, inputs=[gi, gp], outputs=[gout, goimg]).render()
240
+ else:
241
+ gr.Markdown("❌ CheXagent grounding not available.")
 
 
 
 
 
 
 
242
  return demo
243
 
244
  if __name__ == "__main__":
 
245
  ui = create_ui()
246
+ ui.launch(server_name='0.0.0.0', server_port=7860, share=True)