yassonee commited on
Commit
78d26e0
·
verified ·
1 Parent(s): 550ba2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +212 -44
app.py CHANGED
@@ -1,17 +1,26 @@
1
  import streamlit as st
 
 
 
2
  from transformers import pipeline
3
  import torch
4
  from PIL import Image, ImageDraw
5
  import io
6
  import base64
7
- from fastapi import FastAPI, File, UploadFile
8
- from fastapi.middleware.cors import CORSMiddleware
9
  import numpy as np
10
  import json
11
- from starlette.responses import JSONResponse
 
 
 
 
12
 
13
  # FastAPI app
14
- app = FastAPI()
 
 
 
 
15
 
16
  # Enable CORS
17
  app.add_middleware(
@@ -20,21 +29,46 @@ app.add_middleware(
20
  allow_credentials=True,
21
  allow_methods=["*"],
22
  allow_headers=["*"],
 
23
  )
24
 
25
- # Load models
26
  @st.cache_resource
27
  def load_models():
28
- return {
29
- "D3STRON": pipeline("object-detection", model="D3STRON/bone-fracture-detr"),
30
- "Heem2": pipeline("image-classification", model="Heem2/bone-fracture-detection-using-xray"),
31
- "Nandodeomkar": pipeline("image-classification",
32
- model="nandodeomkar/autotrain-fracture-detection-using-google-vit-base-patch-16-54382127388")
33
- }
 
 
 
 
 
 
 
34
 
35
- models = load_models()
 
 
 
 
 
 
36
 
37
  def draw_boxes(image, predictions, threshold=0.6):
 
 
 
 
 
 
 
 
 
 
 
38
  draw = ImageDraw.Draw(image)
39
  filtered_preds = [p for p in predictions if p['score'] >= threshold]
40
 
@@ -42,68 +76,202 @@ def draw_boxes(image, predictions, threshold=0.6):
42
  box = pred['box']
43
  label = f"{pred['label']} ({pred['score']:.2%})"
44
 
 
45
  draw.rectangle(
46
  [(box['xmin'], box['ymin']), (box['xmax'], box['ymax'])],
47
  outline="red",
48
  width=2
49
  )
50
 
51
- draw.text((box['xmin'], box['ymin']), label, fill="red")
 
 
 
 
 
52
 
53
  return image, filtered_preds
54
 
55
- # API Endpoint
56
- @app.post("/detect")
57
- async def detect_fracture(file: UploadFile = File(...), confidence: float = 0.6):
 
 
 
 
 
 
 
 
58
  try:
59
- # Read and process image
60
- contents = await file.read()
61
- image = Image.open(io.BytesIO(contents))
62
-
63
- # Get predictions from all models
64
- results = {}
65
-
66
- # Object detection models
67
  detection_preds = models["D3STRON"](image)
68
  result_image = image.copy()
69
- result_image, filtered_detections = draw_boxes(result_image, detection_preds, confidence)
 
 
 
 
70
 
71
- # Save result image
72
  img_byte_arr = io.BytesIO()
73
  result_image.save(img_byte_arr, format='PNG')
74
  img_byte_arr = img_byte_arr.getvalue()
75
  img_b64 = base64.b64encode(img_byte_arr).decode()
76
 
77
- # Classification models
78
- class_results = {
79
- "Heem2": models["Heem2"](image),
80
- "Nandodeomkar": models["Nandodeomkar"](image)
81
- }
 
 
 
 
 
82
 
83
- return JSONResponse({
 
 
 
 
 
 
 
 
84
  "success": True,
85
  "detections": filtered_detections,
86
  "classifications": class_results,
87
  "image": img_b64
88
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  except Exception as e:
91
- return JSONResponse({
92
- "success": False,
93
- "error": str(e)
94
- })
 
 
 
 
95
 
96
  # Streamlit UI
97
  def main():
98
- st.title("🦴 Fraktur Detektion")
 
99
 
100
- # UI elements...
101
- uploaded_file = st.file_uploader("Röntgenbild hochladen", type=['png', 'jpg', 'jpeg'])
102
- confidence = st.slider("Konfidenzschwelle", 0.0, 1.0, 0.6, 0.05)
 
 
103
 
104
- if uploaded_file:
105
- # Process image and display results...
106
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
 
108
  if __name__ == "__main__":
109
  main()
 
1
  import streamlit as st
2
+ from fastapi import FastAPI, File, UploadFile, Form
3
+ from fastapi.middleware.cors import CORSMiddleware
4
+ from starlette.responses import JSONResponse
5
  from transformers import pipeline
6
  import torch
7
  from PIL import Image, ImageDraw
8
  import io
9
  import base64
 
 
10
  import numpy as np
11
  import json
12
+ import logging
13
+
14
+ # Configure logging
15
+ logging.basicConfig(level=logging.INFO)
16
+ logger = logging.getLogger(__name__)
17
 
18
  # FastAPI app
19
+ app = FastAPI(
20
+ title="Fracture Detection API",
21
+ description="API for detecting fractures in X-ray images using multiple ML models",
22
+ version="1.0.0"
23
+ )
24
 
25
  # Enable CORS
26
  app.add_middleware(
 
29
  allow_credentials=True,
30
  allow_methods=["*"],
31
  allow_headers=["*"],
32
+ expose_headers=["*"]
33
  )
34
 
35
+ # Load models with caching
36
  @st.cache_resource
37
  def load_models():
38
+ logger.info("Loading ML models...")
39
+ try:
40
+ return {
41
+ "D3STRON": pipeline("object-detection", model="D3STRON/bone-fracture-detr"),
42
+ "Heem2": pipeline("image-classification", model="Heem2/bone-fracture-detection-using-xray"),
43
+ "Nandodeomkar": pipeline(
44
+ "image-classification",
45
+ model="nandodeomkar/autotrain-fracture-detection-using-google-vit-base-patch-16-54382127388"
46
+ )
47
+ }
48
+ except Exception as e:
49
+ logger.error(f"Error loading models: {str(e)}")
50
+ raise
51
 
52
+ # Initialize models
53
+ try:
54
+ models = load_models()
55
+ logger.info("Models loaded successfully")
56
+ except Exception as e:
57
+ logger.error(f"Failed to load models: {str(e)}")
58
+ models = None
59
 
60
  def draw_boxes(image, predictions, threshold=0.6):
61
+ """
62
+ Draw bounding boxes and labels on the image for detected fractures.
63
+
64
+ Args:
65
+ image (PIL.Image): Input image
66
+ predictions (list): List of predictions from the model
67
+ threshold (float): Confidence threshold for filtering predictions
68
+
69
+ Returns:
70
+ tuple: (annotated image, filtered predictions)
71
+ """
72
  draw = ImageDraw.Draw(image)
73
  filtered_preds = [p for p in predictions if p['score'] >= threshold]
74
 
 
76
  box = pred['box']
77
  label = f"{pred['label']} ({pred['score']:.2%})"
78
 
79
+ # Draw bounding box
80
  draw.rectangle(
81
  [(box['xmin'], box['ymin']), (box['xmax'], box['ymax'])],
82
  outline="red",
83
  width=2
84
  )
85
 
86
+ # Draw label
87
+ draw.text(
88
+ (box['xmin'], box['ymin'] - 10),
89
+ label,
90
+ fill="red"
91
+ )
92
 
93
  return image, filtered_preds
94
 
95
+ def process_image(image, confidence_threshold):
96
+ """
97
+ Process an image through all models and return combined results.
98
+
99
+ Args:
100
+ image (PIL.Image): Input image
101
+ confidence_threshold (float): Confidence threshold for filtering predictions
102
+
103
+ Returns:
104
+ dict: Combined results from all models
105
+ """
106
  try:
107
+ # Object detection
 
 
 
 
 
 
 
108
  detection_preds = models["D3STRON"](image)
109
  result_image = image.copy()
110
+ result_image, filtered_detections = draw_boxes(
111
+ result_image,
112
+ detection_preds,
113
+ confidence_threshold
114
+ )
115
 
116
+ # Save annotated image
117
  img_byte_arr = io.BytesIO()
118
  result_image.save(img_byte_arr, format='PNG')
119
  img_byte_arr = img_byte_arr.getvalue()
120
  img_b64 = base64.b64encode(img_byte_arr).decode()
121
 
122
+ # Classification results
123
+ class_results = {}
124
+
125
+ # Heem2 model
126
+ try:
127
+ heem2_result = models["Heem2"](image)
128
+ class_results["Heem2"] = heem2_result
129
+ except Exception as e:
130
+ logger.error(f"Error in Heem2 model: {str(e)}")
131
+ class_results["Heem2"] = {"error": str(e)}
132
 
133
+ # Nandodeomkar model
134
+ try:
135
+ nando_result = models["Nandodeomkar"](image)
136
+ class_results["Nandodeomkar"] = nando_result
137
+ except Exception as e:
138
+ logger.error(f"Error in Nandodeomkar model: {str(e)}")
139
+ class_results["Nandodeomkar"] = {"error": str(e)}
140
+
141
+ return {
142
  "success": True,
143
  "detections": filtered_detections,
144
  "classifications": class_results,
145
  "image": img_b64
146
+ }
147
+
148
+ except Exception as e:
149
+ logger.error(f"Error processing image: {str(e)}")
150
+ raise
151
+
152
+ # API Endpoints
153
+ @app.post("/detect")
154
+ @app.post("/api/predict")
155
+ async def detect_fracture(
156
+ file: UploadFile = File(...),
157
+ confidence: float = Form(default=0.6)
158
+ ):
159
+ """
160
+ Endpoint for fracture detection in X-ray images.
161
+
162
+ Args:
163
+ file (UploadFile): Uploaded image file
164
+ confidence (float): Confidence threshold for predictions
165
+
166
+ Returns:
167
+ JSONResponse: Detection results including annotated image
168
+ """
169
+ logger.info(f"Received request with confidence threshold: {confidence}")
170
+
171
+ try:
172
+ # Validate confidence threshold
173
+ if not 0 <= confidence <= 1:
174
+ return JSONResponse(
175
+ status_code=400,
176
+ content={
177
+ "success": False,
178
+ "error": "Confidence threshold must be between 0 and 1"
179
+ }
180
+ )
181
 
182
+ # Read and validate image
183
+ contents = await file.read()
184
+ try:
185
+ image = Image.open(io.BytesIO(contents))
186
+ except Exception as e:
187
+ return JSONResponse(
188
+ status_code=400,
189
+ content={
190
+ "success": False,
191
+ "error": f"Invalid image file: {str(e)}"
192
+ }
193
+ )
194
+
195
+ # Process image
196
+ try:
197
+ results = process_image(image, confidence)
198
+ logger.info("Image processed successfully")
199
+ return JSONResponse(content=results)
200
+
201
+ except Exception as e:
202
+ logger.error(f"Error processing image: {str(e)}")
203
+ return JSONResponse(
204
+ status_code=500,
205
+ content={
206
+ "success": False,
207
+ "error": f"Error processing image: {str(e)}"
208
+ }
209
+ )
210
+
211
  except Exception as e:
212
+ logger.error(f"Unexpected error: {str(e)}")
213
+ return JSONResponse(
214
+ status_code=500,
215
+ content={
216
+ "success": False,
217
+ "error": f"Unexpected error: {str(e)}"
218
+ }
219
+ )
220
 
221
  # Streamlit UI
222
  def main():
223
+ st.title("🦴 Fracture Detection System")
224
+ st.write("Upload an X-ray image to detect potential fractures")
225
 
226
+ # File uploader
227
+ uploaded_file = st.file_uploader(
228
+ "Upload X-ray image",
229
+ type=['png', 'jpg', 'jpeg']
230
+ )
231
 
232
+ # Confidence threshold slider
233
+ confidence = st.slider(
234
+ "Confidence Threshold",
235
+ min_value=0.0,
236
+ max_value=1.0,
237
+ value=0.6,
238
+ step=0.05
239
+ )
240
+
241
+ if uploaded_file is not None:
242
+ # Display original image
243
+ image = Image.open(uploaded_file)
244
+ st.image(image, caption="Original Image", use_column_width=True)
245
+
246
+ if st.button("Analyze Image"):
247
+ try:
248
+ # Process image
249
+ results = process_image(image, confidence)
250
+
251
+ if results["success"]:
252
+ # Display results
253
+ st.success("Analysis completed successfully!")
254
+
255
+ # Show annotated image
256
+ annotated_image = Image.open(io.BytesIO(base64.b64decode(results["image"])))
257
+ st.image(annotated_image, caption="Detected Fractures", use_column_width=True)
258
+
259
+ # Show detections
260
+ if results["detections"]:
261
+ st.subheader("Detected Fractures")
262
+ for det in results["detections"]:
263
+ st.write(f"- {det['label']}: {det['score']:.2%} confidence")
264
+
265
+ # Show classifications
266
+ st.subheader("Classification Results")
267
+ for model, preds in results["classifications"].items():
268
+ st.write(f"**{model} Model:**")
269
+ st.json(preds)
270
+ else:
271
+ st.error("Analysis failed. Please try again.")
272
+
273
+ except Exception as e:
274
+ st.error(f"Error during analysis: {str(e)}")
275
 
276
  if __name__ == "__main__":
277
  main()