pascal-maker commited on
Commit
12028c3
Β·
verified Β·
1 Parent(s): 4d030cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -21
app.py CHANGED
@@ -1,4 +1,4 @@
1
- ##!/usr/bin/env python
2
  # -*- coding: utf-8 -*-
3
  """
4
  Combined Medical-VLM, SAM-2 Automatic Masking, and CheXagent Demo
@@ -173,21 +173,28 @@ try:
173
  chex_model = AutoModelForCausalLM.from_pretrained("StanfordAIMI/CheXagent-2-3b", device_map='auto', trust_remote_code=True)
174
  if torch.cuda.is_available(): chex_model = chex_model.half()
175
  chex_model.eval(); CHEX_AVAILABLE=True
176
- except Exception:
 
177
  CHEX_AVAILABLE=False
178
 
179
  @torch.no_grad()
180
  def report_generation(im1, im2):
181
- if not CHEX_AVAILABLE: yield "CheXagent unavailable"; return
182
- streamer = TextIteratorStreamer(chex_tok, skip_prompt=True)
183
- yield "Report streaming not fully implemented"
 
184
 
185
  @torch.no_grad()
186
  def phrase_grounding(image, prompt):
187
- if not CHEX_AVAILABLE: return "CheXagent unavailable", None
188
- w,h=image.size; draw=ImageDraw.Draw(image)
 
 
 
 
 
189
  draw.rectangle([(w*0.25,h*0.25),(w*0.75,h*0.75)], outline='red', width=3)
190
- return prompt, image
191
 
192
  # =============================================================================
193
  # Gradio UI
@@ -196,23 +203,98 @@ def phrase_grounding(image, prompt):
196
  def create_ui():
197
  try:
198
  m, p, d = load_qwen_model_and_processor()
199
- med = MedicalVLMAgent(m,p,d); QW=True
200
- except:
201
- QW=False; med=None
202
- with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
  gr.Markdown("# Medical AI Assistant")
204
- gr.Markdown(f"- Qwen: {'βœ…' if QW else '❌'} - SAM-2: {'βœ…' if _mask_generator else '❌'} - CheX: {'βœ…' if CHEX_AVAILABLE else '❌'}")
 
 
205
  with gr.Tab("Medical Q&A"):
206
- txt=gr.Textbox(); img=gr.Image(type='pil'); out=gr.Textbox(); gr.Button("Ask").click(med.run,[txt,img],out)
207
- with gr.Tab("Segmentation"):
208
- seg=gr.Image(type='pil'); so=gr.Image(); ss=gr.Textbox(); fn=segmentation_interface if _mask_generator else fallback_segmentation; gr.Button("Segment").click(fn,seg,[so,ss])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
  with gr.Tab("CheXagent Report"):
210
- c1=gr.Image(type='pil');c2=gr.Image(type='pil'); rout=gr.Markdown(); gr.Interface(report_generation,[c1,c2],rout,live=True).render()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  with gr.Tab("CheXagent Grounding"):
212
- gi=gr.Image(type='pil'); gp=gr.Textbox(); gout=gr.Textbox(); goimg=gr.Image(); gr.Interface(phrase_grounding,[gi,gp],[gout,goimg]).render()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
  return demo
214
 
215
  if __name__ == "__main__":
216
- ui=create_ui(); ui.launch(server_name='0.0.0.0',server_port=7860,share=True)
217
-
218
-
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
  # -*- coding: utf-8 -*-
3
  """
4
  Combined Medical-VLM, SAM-2 Automatic Masking, and CheXagent Demo
 
173
  chex_model = AutoModelForCausalLM.from_pretrained("StanfordAIMI/CheXagent-2-3b", device_map='auto', trust_remote_code=True)
174
  if torch.cuda.is_available(): chex_model = chex_model.half()
175
  chex_model.eval(); CHEX_AVAILABLE=True
176
+ except Exception as e:
177
+ print(f"CheXagent loading error: {e}")
178
  CHEX_AVAILABLE=False
179
 
180
  @torch.no_grad()
181
  def report_generation(im1, im2):
182
+ if not CHEX_AVAILABLE:
183
+ return "CheXagent is not available. Please check the logs for loading errors."
184
+ # Simplified implementation - actual CheXagent usage would be more complex
185
+ return "Report generation feature is in development. CheXagent model loaded successfully."
186
 
187
  @torch.no_grad()
188
  def phrase_grounding(image, prompt):
189
+ if not CHEX_AVAILABLE:
190
+ return "CheXagent is not available", None
191
+ if image is None:
192
+ return "Please upload an image", None
193
+ # Simple mock grounding - actual implementation would use CheXagent's grounding capabilities
194
+ w,h=image.size;
195
+ draw=ImageDraw.Draw(image)
196
  draw.rectangle([(w*0.25,h*0.25),(w*0.75,h*0.75)], outline='red', width=3)
197
+ return f"Mock grounding for: {prompt}", image
198
 
199
  # =============================================================================
200
  # Gradio UI
 
203
  def create_ui():
204
  try:
205
  m, p, d = load_qwen_model_and_processor()
206
+ med = MedicalVLMAgent(m, p, d)
207
+ QW = True
208
+ print("Qwen model loaded successfully")
209
+ except Exception as e:
210
+ print(f"Failed to load Qwen model: {e}")
211
+ QW = False
212
+ med = None
213
+
214
+ # Define a safe medical function that handles None case
215
+ def safe_medical_qa(text, image):
216
+ if med is None:
217
+ return "Medical Q&A model is not available. Please check the logs for loading errors."
218
+ try:
219
+ return med.run(text, image)
220
+ except Exception as e:
221
+ return f"Error processing request: {str(e)}"
222
+
223
+ with gr.Blocks(title="Medical AI Assistant") as demo:
224
  gr.Markdown("# Medical AI Assistant")
225
+ gr.Markdown("**Disclaimer**: This is for educational purposes only. Always consult healthcare professionals for medical advice.")
226
+ gr.Markdown(f"**Model Status**: Qwen: {'βœ…' if QW else '❌'} | SAM-2: {'βœ…' if _mask_generator else '❌'} | CheXagent: {'βœ…' if CHEX_AVAILABLE else '❌'}")
227
+
228
  with gr.Tab("Medical Q&A"):
229
+ gr.Markdown("Ask questions about medical images or general medical topics.")
230
+ with gr.Row():
231
+ with gr.Column():
232
+ txt = gr.Textbox(
233
+ label="Your Question",
234
+ placeholder="Ask about symptoms, conditions, or describe what you see in an image...",
235
+ lines=3
236
+ )
237
+ img = gr.Image(type='pil', label="Medical Image (optional)")
238
+ ask_btn = gr.Button("Ask Question", variant="primary")
239
+ with gr.Column():
240
+ out = gr.Textbox(label="AI Response", lines=10)
241
+
242
+ ask_btn.click(safe_medical_qa, [txt, img], out)
243
+
244
+ with gr.Tab("Image Segmentation"):
245
+ gr.Markdown("Automatically segment medical images using SAM-2 or fallback method.")
246
+ with gr.Row():
247
+ with gr.Column():
248
+ seg = gr.Image(type='pil', label="Input Medical Image")
249
+ seg_btn = gr.Button("Segment Image", variant="primary")
250
+ with gr.Column():
251
+ so = gr.Image(label="Segmented Output")
252
+ ss = gr.Textbox(label="Segmentation Status")
253
+
254
+ fn = segmentation_interface if _mask_generator else fallback_segmentation
255
+ seg_btn.click(fn, seg, [so, ss])
256
+
257
  with gr.Tab("CheXagent Report"):
258
+ gr.Markdown("Generate structured radiology reports using CheXagent.")
259
+ with gr.Row():
260
+ with gr.Column():
261
+ c1 = gr.Image(type='pil', label="Chest X-ray Image")
262
+ c2 = gr.Image(type='pil', label="Comparison Image (optional)")
263
+ report_btn = gr.Button("Generate Report", variant="primary")
264
+ with gr.Column():
265
+ rout = gr.Markdown(label="Generated Report")
266
+
267
+ report_btn.click(
268
+ lambda x, y: report_generation(x, y),
269
+ [c1, c2],
270
+ rout
271
+ )
272
+
273
  with gr.Tab("CheXagent Grounding"):
274
+ gr.Markdown("Ground specific phrases or findings in medical images.")
275
+ with gr.Row():
276
+ with gr.Column():
277
+ gi = gr.Image(type='pil', label="Medical Image")
278
+ gp = gr.Textbox(
279
+ label="Phrase to Ground",
280
+ placeholder="e.g., 'pneumonia', 'heart shadow', 'fracture'..."
281
+ )
282
+ ground_btn = gr.Button("Ground Phrase", variant="primary")
283
+ with gr.Column():
284
+ gout = gr.Textbox(label="Grounding Result")
285
+ goimg = gr.Image(label="Image with Grounding")
286
+
287
+ ground_btn.click(phrase_grounding, [gi, gp], [gout, goimg])
288
+
289
  return demo
290
 
291
  if __name__ == "__main__":
292
+ print("Starting Medical AI Assistant...")
293
+ ui = create_ui()
294
+ ui.launch(
295
+ server_name='0.0.0.0',
296
+ server_port=7860,
297
+ share=True,
298
+ show_error=True
299
+ )===============================================================
300
+ # Gradio UI