Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,256 +1,454 @@
|
|
1 |
#!/usr/bin/env python
|
2 |
-
|
3 |
"""
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
Features:
|
8 |
-
- Qwen2.5-VL Instruct medical vision-language Q&A
|
9 |
-
- SAM-2 segmentation with alias patch for Hugging Face
|
10 |
-
- Simple fallback segmentation
|
11 |
-
- CheXagent structured report & visual grounding
|
12 |
-
- Automatic dependency checking & installation for SAM-2
|
13 |
-
|
14 |
-
Usage:
|
15 |
-
HF_TOKEN=<your_token> python medical_ai_app.py # if private models require auth
|
16 |
-
Requires:
|
17 |
-
torch, transformers, PIL, gradio, ultralytics, requests, opencv-python, pyyaml
|
18 |
"""
|
|
|
19 |
import os
|
20 |
import sys
|
21 |
-
import
|
22 |
-
import tempfile
|
23 |
-
import subprocess
|
24 |
-
import warnings
|
25 |
-
from threading import Thread
|
26 |
-
from pathlib import Path
|
27 |
-
|
28 |
-
# Hugging Face token (for private models)
|
29 |
-
HF_TOKEN = os.getenv("HF_TOKEN")
|
30 |
-
|
31 |
-
# Environment setup
|
32 |
-
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
33 |
-
warnings.filterwarnings("ignore", message=r".*upsample_bicubic2d.*")
|
34 |
-
|
35 |
-
# Third-party libs
|
36 |
-
import torch
|
37 |
import numpy as np
|
38 |
-
import
|
39 |
-
from PIL import Image, ImageDraw
|
40 |
import gradio as gr
|
41 |
-
|
42 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
|
43 |
-
import importlib
|
44 |
|
45 |
-
#
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
except ImportError:
|
54 |
-
pass
|
55 |
|
56 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
try:
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
60 |
except ImportError:
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
try:
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
return True
|
75 |
except ImportError:
|
76 |
-
|
|
|
|
|
77 |
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
84 |
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
-
def
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
|
|
|
|
151 |
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
if SAM2_AVAILABLE:
|
157 |
try:
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
)
|
164 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
except Exception as e:
|
166 |
-
|
167 |
-
|
168 |
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
196 |
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
|
205 |
-
# =============================================================================
|
206 |
-
# Gradio UI
|
207 |
-
# =============================================================================
|
208 |
def create_ui():
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
251 |
return demo
|
252 |
|
253 |
if __name__ == "__main__":
|
254 |
-
|
255 |
-
|
256 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
#!/usr/bin/env python
|
2 |
+
#!/usr/bin/env python3
|
3 |
"""
|
4 |
+
Complete Medical Image Analysis Application with Error Handling
|
5 |
+
Includes fallback mechanisms for when models fail to load
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
"""
|
7 |
+
|
8 |
import os
|
9 |
import sys
|
10 |
+
import traceback
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
import numpy as np
|
12 |
+
from PIL import Image
|
|
|
13 |
import gradio as gr
|
14 |
+
import logging
|
|
|
|
|
15 |
|
16 |
+
# Setup logging
|
17 |
+
logging.basicConfig(level=logging.INFO)
|
18 |
+
logger = logging.getLogger(__name__)
|
19 |
+
|
20 |
+
# Global variables for model availability
|
21 |
+
_mask_generator = None
|
22 |
+
_chexagent_model = None
|
23 |
+
_qwen_model = None
|
|
|
|
|
24 |
|
25 |
+
def install_missing_dependencies():
|
26 |
+
"""Install missing dependencies if possible"""
|
27 |
+
import subprocess
|
28 |
+
|
29 |
+
missing_packages = []
|
30 |
+
|
31 |
+
# Check for required packages
|
32 |
try:
|
33 |
+
import albumentations
|
34 |
+
except ImportError:
|
35 |
+
missing_packages.append('albumentations')
|
36 |
+
|
37 |
+
try:
|
38 |
+
import einops
|
39 |
except ImportError:
|
40 |
+
missing_packages.append('einops')
|
41 |
+
|
42 |
+
try:
|
43 |
+
import cv2
|
44 |
+
except ImportError:
|
45 |
+
missing_packages.append('opencv-python')
|
46 |
+
|
47 |
+
if missing_packages:
|
48 |
+
logger.info(f"Installing missing packages: {missing_packages}")
|
49 |
+
for package in missing_packages:
|
50 |
+
try:
|
51 |
+
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
|
52 |
+
logger.info(f"Successfully installed {package}")
|
53 |
+
except subprocess.CalledProcessError:
|
54 |
+
logger.warning(f"Failed to install {package}")
|
55 |
+
|
56 |
+
# Install missing dependencies at startup
|
57 |
+
install_missing_dependencies()
|
58 |
+
|
59 |
+
def check_dependencies():
|
60 |
+
"""Check if all required dependencies are available"""
|
61 |
+
deps_status = {
|
62 |
+
'torch': False,
|
63 |
+
'torchvision': False,
|
64 |
+
'transformers': False,
|
65 |
+
'albumentations': False,
|
66 |
+
'einops': False,
|
67 |
+
'cv2': False
|
68 |
+
}
|
69 |
+
|
70 |
+
for dep in deps_status:
|
71 |
try:
|
72 |
+
if dep == 'cv2':
|
73 |
+
import cv2
|
74 |
+
else:
|
75 |
+
__import__(dep)
|
76 |
+
deps_status[dep] = True
|
|
|
77 |
except ImportError:
|
78 |
+
logger.warning(f"Dependency {dep} not available")
|
79 |
+
|
80 |
+
return deps_status
|
81 |
|
82 |
+
def fallback_segmentation(image, prompt=None):
|
83 |
+
"""
|
84 |
+
Fallback segmentation function when SAM-2 is not available
|
85 |
+
Returns a simple placeholder or basic segmentation
|
86 |
+
"""
|
87 |
+
try:
|
88 |
+
import cv2
|
89 |
+
return enhanced_fallback_segmentation(image, prompt)
|
90 |
+
except ImportError:
|
91 |
+
return simple_fallback_segmentation(image, prompt)
|
92 |
|
93 |
+
def simple_fallback_segmentation(image, prompt=None):
|
94 |
+
"""Simple fallback without OpenCV"""
|
95 |
+
if isinstance(image, str):
|
96 |
+
image = Image.open(image)
|
97 |
+
elif hasattr(image, 'convert'):
|
98 |
+
image = image.convert('RGB')
|
99 |
+
else:
|
100 |
+
image = Image.fromarray(image)
|
101 |
+
|
102 |
+
# Create a simple mask as fallback
|
103 |
+
width, height = image.size
|
104 |
+
mask = np.zeros((height, width), dtype=np.uint8)
|
105 |
+
|
106 |
+
# Create a simple rectangular mask in the center
|
107 |
+
center_x, center_y = width // 2, height // 2
|
108 |
+
mask_size = min(width, height) // 4
|
109 |
+
mask[center_y-mask_size:center_y+mask_size,
|
110 |
+
center_x-mask_size:center_x+mask_size] = 255
|
111 |
+
|
112 |
+
return {
|
113 |
+
'masks': [mask],
|
114 |
+
'scores': [0.5],
|
115 |
+
'message': 'Using simple fallback segmentation - SAM-2 not available'
|
116 |
+
}
|
117 |
|
118 |
+
def enhanced_fallback_segmentation(image, prompt=None):
|
119 |
+
"""Enhanced fallback using OpenCV operations"""
|
120 |
+
import cv2
|
121 |
+
|
122 |
+
try:
|
123 |
+
# Convert image to OpenCV format
|
124 |
+
if isinstance(image, str):
|
125 |
+
cv_image = cv2.imread(image)
|
126 |
+
elif hasattr(image, 'convert'):
|
127 |
+
cv_image = cv2.cvtColor(np.array(image.convert('RGB')), cv2.COLOR_RGB2BGR)
|
128 |
+
else:
|
129 |
+
cv_image = image
|
130 |
+
|
131 |
+
# Convert to grayscale
|
132 |
+
gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
|
133 |
+
|
134 |
+
# Apply GaussianBlur to reduce noise
|
135 |
+
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
|
136 |
+
|
137 |
+
# Apply threshold to get binary image
|
138 |
+
_, thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
139 |
+
|
140 |
+
# Find contours
|
141 |
+
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
142 |
+
|
143 |
+
# Create mask from largest contour
|
144 |
+
mask = np.zeros(gray.shape, dtype=np.uint8)
|
145 |
+
if contours:
|
146 |
+
largest_contour = max(contours, key=cv2.contourArea)
|
147 |
+
cv2.fillPoly(mask, [largest_contour], 255)
|
148 |
+
|
149 |
+
return {
|
150 |
+
'masks': [mask],
|
151 |
+
'scores': [0.7],
|
152 |
+
'message': 'Using OpenCV-based fallback segmentation'
|
153 |
+
}
|
154 |
+
|
155 |
+
except Exception as e:
|
156 |
+
logger.error(f"OpenCV fallback failed: {e}")
|
157 |
+
return simple_fallback_segmentation(image, prompt)
|
158 |
|
159 |
+
def load_sam2_model():
|
160 |
+
"""Load SAM-2 model with error handling"""
|
161 |
+
global _mask_generator
|
162 |
+
|
163 |
+
try:
|
164 |
+
# Check if SAM-2 directory exists
|
165 |
+
if not os.path.exists('./segment-anything-2'):
|
166 |
+
logger.warning("SAM-2 directory not found")
|
167 |
+
return False
|
168 |
+
|
169 |
+
# Try to import SAM-2
|
170 |
+
sys.path.append('./segment-anything-2')
|
171 |
+
from sam2.build_sam import build_sam2
|
172 |
+
from sam2.sam2_image_predictor import SAM2ImagePredictor
|
173 |
+
|
174 |
+
# Load the model
|
175 |
+
checkpoint = "./segment-anything-2/checkpoints/sam2_hiera_large.pt"
|
176 |
+
model_cfg = "sam2_hiera_l.yaml"
|
177 |
+
|
178 |
+
if not os.path.exists(checkpoint):
|
179 |
+
logger.warning(f"SAM-2 checkpoint not found: {checkpoint}")
|
180 |
+
return False
|
181 |
+
|
182 |
+
sam2_model = build_sam2(model_cfg, checkpoint, device="cpu")
|
183 |
+
_mask_generator = SAM2ImagePredictor(sam2_model)
|
184 |
+
|
185 |
+
logger.info("SAM-2 model loaded successfully")
|
186 |
+
return True
|
187 |
+
|
188 |
+
except Exception as e:
|
189 |
+
logger.error(f"Failed to load SAM-2: {e}")
|
190 |
+
return False
|
191 |
|
192 |
+
def load_chexagent_model():
|
193 |
+
"""Load CheXagent model with error handling"""
|
194 |
+
global _chexagent_model
|
195 |
+
|
196 |
+
try:
|
197 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
198 |
+
|
199 |
+
model_name = "StanfordAIMI/CheXagent-2-3b"
|
200 |
+
|
201 |
+
# Check if required packages are available
|
202 |
+
try:
|
203 |
+
import albumentations
|
204 |
+
import einops
|
205 |
+
except ImportError as e:
|
206 |
+
logger.error(f"Missing dependencies for CheXagent: {e}")
|
207 |
+
return False
|
208 |
+
|
209 |
+
_chexagent_model = {
|
210 |
+
'tokenizer': AutoTokenizer.from_pretrained(model_name),
|
211 |
+
'model': AutoModelForCausalLM.from_pretrained(model_name, torch_dtype='auto')
|
212 |
+
}
|
213 |
+
|
214 |
+
logger.info("CheXagent model loaded successfully")
|
215 |
+
return True
|
216 |
+
|
217 |
+
except Exception as e:
|
218 |
+
logger.error(f"Failed to load CheXagent: {e}")
|
219 |
+
return False
|
220 |
|
221 |
+
def load_qwen_model():
|
222 |
+
"""Load Qwen model with error handling"""
|
223 |
+
global _qwen_model
|
224 |
+
|
|
|
225 |
try:
|
226 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoProcessor
|
227 |
+
|
228 |
+
model_name = "Qwen/Qwen2-VL-7B-Instruct"
|
229 |
+
|
230 |
+
# Check torchvision availability
|
231 |
+
try:
|
232 |
+
import torchvision
|
233 |
+
logger.info(f"Torchvision version: {torchvision.__version__}")
|
234 |
+
except ImportError:
|
235 |
+
logger.error("Torchvision not available for Qwen model")
|
236 |
+
return False
|
237 |
+
|
238 |
+
processor = AutoProcessor.from_pretrained(model_name)
|
239 |
+
model = AutoModelForCausalLM.from_pretrained(
|
240 |
+
model_name,
|
241 |
+
torch_dtype='auto',
|
242 |
+
device_map="cpu"
|
243 |
)
|
244 |
+
|
245 |
+
_qwen_model = {
|
246 |
+
'processor': processor,
|
247 |
+
'model': model
|
248 |
+
}
|
249 |
+
|
250 |
+
logger.info("Qwen model loaded successfully")
|
251 |
+
return True
|
252 |
+
|
253 |
except Exception as e:
|
254 |
+
logger.error(f"Failed to load Qwen model: {e}")
|
255 |
+
return False
|
256 |
|
257 |
+
def segmentation_interface(image, prompt=None):
|
258 |
+
"""Main segmentation interface"""
|
259 |
+
global _mask_generator
|
260 |
+
|
261 |
+
if _mask_generator is None:
|
262 |
+
return fallback_segmentation(image, prompt)
|
263 |
+
|
264 |
+
try:
|
265 |
+
# Convert image if needed
|
266 |
+
if isinstance(image, str):
|
267 |
+
image = Image.open(image)
|
268 |
+
|
269 |
+
# Process with SAM-2
|
270 |
+
_mask_generator.set_image(np.array(image))
|
271 |
+
|
272 |
+
if prompt:
|
273 |
+
# Use prompt-based segmentation if available
|
274 |
+
masks, scores, _ = _mask_generator.predict(prompt)
|
275 |
+
else:
|
276 |
+
# Use automatic segmentation
|
277 |
+
masks, scores, _ = _mask_generator.predict()
|
278 |
+
|
279 |
+
return {
|
280 |
+
'masks': masks,
|
281 |
+
'scores': scores,
|
282 |
+
'message': 'Segmentation completed successfully'
|
283 |
+
}
|
284 |
+
|
285 |
+
except Exception as e:
|
286 |
+
logger.error(f"Segmentation failed: {e}")
|
287 |
+
return fallback_segmentation(image, prompt)
|
288 |
|
289 |
+
def chexagent_analysis(image, question="What do you see in this chest X-ray?"):
|
290 |
+
"""Analyze medical image with CheXagent"""
|
291 |
+
global _chexagent_model
|
292 |
+
|
293 |
+
if _chexagent_model is None:
|
294 |
+
return "CheXagent model not available. Please check the installation."
|
295 |
+
|
296 |
+
try:
|
297 |
+
# Process image and generate response
|
298 |
+
# This is a simplified example - adjust based on actual CheXagent API
|
299 |
+
return f"CheXagent analysis: {question} - Model loaded but needs proper implementation"
|
300 |
+
|
301 |
+
except Exception as e:
|
302 |
+
logger.error(f"CheXagent analysis failed: {e}")
|
303 |
+
return f"Analysis failed: {str(e)}"
|
304 |
|
305 |
+
def qwen_analysis(image, question="Describe this medical image"):
|
306 |
+
"""Analyze image with Qwen model"""
|
307 |
+
global _qwen_model
|
308 |
+
|
309 |
+
if _qwen_model is None:
|
310 |
+
return "Qwen model not available. Please check the installation."
|
311 |
+
|
312 |
+
try:
|
313 |
+
# Process image and generate response
|
314 |
+
# This is a simplified example - adjust based on actual Qwen API
|
315 |
+
return f"Qwen analysis: {question} - Model loaded but needs proper implementation"
|
316 |
+
|
317 |
+
except Exception as e:
|
318 |
+
logger.error(f"Qwen analysis failed: {e}")
|
319 |
+
return f"Analysis failed: {str(e)}"
|
320 |
|
|
|
|
|
|
|
321 |
def create_ui():
|
322 |
+
"""Create the Gradio interface"""
|
323 |
+
|
324 |
+
# Load models
|
325 |
+
logger.info("Loading models...")
|
326 |
+
sam2_available = load_sam2_model()
|
327 |
+
chexagent_available = load_chexagent_model()
|
328 |
+
qwen_available = load_qwen_model()
|
329 |
+
|
330 |
+
# Check dependencies
|
331 |
+
deps = check_dependencies()
|
332 |
+
|
333 |
+
# Status message
|
334 |
+
status_msg = f"""
|
335 |
+
Model Status:
|
336 |
+
- SAM-2 Segmentation: {'β
Available' if sam2_available else 'β Not available (using fallback)'}
|
337 |
+
- CheXagent: {'β
Available' if chexagent_available else 'β Not available'}
|
338 |
+
- Qwen VL: {'β
Available' if qwen_available else 'β Not available'}
|
339 |
+
|
340 |
+
Dependencies:
|
341 |
+
{' '.join([f"- {k}: {'β
' if v else 'β'}" for k, v in deps.items()])}
|
342 |
+
"""
|
343 |
+
|
344 |
+
# Create interface
|
345 |
+
with gr.Blocks(title="Medical Image Analysis Tool") as demo:
|
346 |
+
gr.Markdown("# Medical Image Analysis Tool")
|
347 |
+
gr.Markdown(status_msg)
|
348 |
+
|
349 |
+
with gr.Tab("Image Segmentation"):
|
350 |
+
with gr.Row():
|
351 |
+
with gr.Column():
|
352 |
+
seg_image = gr.Image(type="pil", label="Upload Image")
|
353 |
+
seg_prompt = gr.Textbox(label="Segmentation Prompt (optional)")
|
354 |
+
seg_button = gr.Button("Segment Image")
|
355 |
+
|
356 |
+
with gr.Column():
|
357 |
+
seg_output = gr.JSON(label="Segmentation Results")
|
358 |
+
|
359 |
+
seg_button.click(
|
360 |
+
fn=segmentation_interface,
|
361 |
+
inputs=[seg_image, seg_prompt],
|
362 |
+
outputs=seg_output
|
363 |
+
)
|
364 |
+
|
365 |
+
with gr.Tab("CheXagent Analysis"):
|
366 |
+
with gr.Row():
|
367 |
+
with gr.Column():
|
368 |
+
chex_image = gr.Image(type="pil", label="Upload Chest X-ray")
|
369 |
+
chex_question = gr.Textbox(
|
370 |
+
value="What do you see in this chest X-ray?",
|
371 |
+
label="Question"
|
372 |
+
)
|
373 |
+
chex_button = gr.Button("Analyze with CheXagent")
|
374 |
+
|
375 |
+
with gr.Column():
|
376 |
+
chex_output = gr.Textbox(label="Analysis Results")
|
377 |
+
|
378 |
+
chex_button.click(
|
379 |
+
fn=chexagent_analysis,
|
380 |
+
inputs=[chex_image, chex_question],
|
381 |
+
outputs=chex_output
|
382 |
+
)
|
383 |
+
|
384 |
+
with gr.Tab("Qwen VL Analysis"):
|
385 |
+
with gr.Row():
|
386 |
+
with gr.Column():
|
387 |
+
qwen_image = gr.Image(type="pil", label="Upload Medical Image")
|
388 |
+
qwen_question = gr.Textbox(
|
389 |
+
value="Describe this medical image",
|
390 |
+
label="Question"
|
391 |
+
)
|
392 |
+
qwen_button = gr.Button("Analyze with Qwen")
|
393 |
+
|
394 |
+
with gr.Column():
|
395 |
+
qwen_output = gr.Textbox(label="Analysis Results")
|
396 |
+
|
397 |
+
qwen_button.click(
|
398 |
+
fn=qwen_analysis,
|
399 |
+
inputs=[qwen_image, qwen_question],
|
400 |
+
outputs=qwen_output
|
401 |
+
)
|
402 |
+
|
403 |
+
with gr.Tab("System Information"):
|
404 |
+
gr.Markdown("### System Status")
|
405 |
+
gr.Markdown(status_msg)
|
406 |
+
|
407 |
+
def get_system_info():
|
408 |
+
import platform
|
409 |
+
info = f"""
|
410 |
+
Python Version: {sys.version}
|
411 |
+
Platform: {platform.platform()}
|
412 |
+
Working Directory: {os.getcwd()}
|
413 |
+
"""
|
414 |
+
return info
|
415 |
+
|
416 |
+
gr.Markdown(get_system_info())
|
417 |
+
|
418 |
return demo
|
419 |
|
420 |
if __name__ == "__main__":
|
421 |
+
try:
|
422 |
+
# Create and launch the UI
|
423 |
+
logger.info("Starting Medical Image Analysis Tool...")
|
424 |
+
ui = create_ui()
|
425 |
+
|
426 |
+
# Launch with error handling
|
427 |
+
ui.launch(
|
428 |
+
server_name="0.0.0.0",
|
429 |
+
server_port=7860,
|
430 |
+
share=False,
|
431 |
+
debug=True
|
432 |
+
)
|
433 |
+
|
434 |
+
except Exception as e:
|
435 |
+
logger.error(f"Failed to start application: {e}")
|
436 |
+
traceback.print_exc()
|
437 |
+
|
438 |
+
# Fallback: create minimal interface
|
439 |
+
logger.info("Creating minimal fallback interface...")
|
440 |
+
|
441 |
+
def minimal_interface():
|
442 |
+
return gr.Interface(
|
443 |
+
fn=lambda x: "Application running in minimal mode due to errors",
|
444 |
+
inputs=gr.Image(type="pil"),
|
445 |
+
outputs=gr.Textbox(),
|
446 |
+
title="Medical Image Analysis - Minimal Mode"
|
447 |
+
)
|
448 |
+
|
449 |
+
minimal_ui = minimal_interface()
|
450 |
+
minimal_ui.launch(
|
451 |
+
server_name="0.0.0.0",
|
452 |
+
server_port=7860,
|
453 |
+
share=False
|
454 |
+
)
|