Spaces:
Runtime error
Runtime error
File size: 5,949 Bytes
7b813cc 2204e0b 7b813cc a8df9e4 e3eebda 7b813cc e67e1b9 7b813cc e67e1b9 7b813cc e67e1b9 7b813cc e67e1b9 7b813cc e67e1b9 7b813cc e67e1b9 7b813cc e67e1b9 7b813cc e67e1b9 7b813cc e67e1b9 7b813cc e67e1b9 7b813cc e67e1b9 7b813cc e67e1b9 7b813cc e67e1b9 7b813cc a8df9e4 e67e1b9 7b813cc e3eebda 93224ed e3eebda 7b813cc e3eebda e67e1b9 7b813cc e67e1b9 7b813cc e67e1b9 7b813cc e67e1b9 7b813cc e67e1b9 7b813cc e67e1b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 |
from flask import Flask, request, jsonify, render_template
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from detectron2.data import MetadataCatalog
from detectron2.utils.visualizer import Visualizer, ColorMode
import numpy as np
from PIL import Image
import io
import os
import requests
import gdown
from skimage import io as skio
from torchvision.ops import box_iou
import torch
# Initialize Flask app
app = Flask(__name__)
cfg = None
# Google Drive file URL
GDRIVE_MODEL_URL = "https://drive.google.com/uc?id=18aEDo-kWOBhg8mAhnbpFkuM6bmmrBH4E" # Replace 'your-file-id' with the actual file ID from Google Drive
LOCAL_MODEL_PATH = "model_final.pth"
def download_file_from_google_drive(id, destination):
gdown.download(GDRIVE_MODEL_URL, LOCAL_MODEL_PATH, quiet=False)
file_id = "18aEDo-kWOBhg8mAhnbpFkuM6bmmrBH4E"
destination = "model_final.pth"
download_file_from_google_drive(file_id, destination)
# Download model from Google Drive if not already present locally
def download_model():
if not os.path.exists(LOCAL_MODEL_PATH):
response = requests.get(GDRIVE_MODEL_URL, stream=True)
if response.status_code == 200:
with open(LOCAL_MODEL_PATH, "wb") as f:
f.write(response.content)
else:
raise Exception(
f"Failed to download model from Google Drive: {response.status_code}"
)
# Configuration and model setup
def setup_model(model_path):
global cfg
cfg = get_cfg()
cfg.merge_from_file("config.yaml") # Update with the config file path
cfg.MODEL.WEIGHTS = model_path
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
cfg.MODEL.DEVICE = "cpu" # Use "cuda" for GPU
return DefaultPredictor(cfg)
# Ensure model is available
predictor = setup_model(LOCAL_MODEL_PATH)
# Define expected parts and costs
expected_parts = ["headlamp", "rear_bumper", "door", "hood", "front_bumper"]
cost_dict = {
"headlamp": 300,
"rear_bumper": 250,
"door": 200,
"hood": 220,
"front_bumper": 250,
"other": 150,
}
@app.route("/")
def home():
return render_template("index.html")
@app.route("/upload", methods=["POST"])
def upload():
if "file" not in request.files:
return jsonify({"error": "No file uploaded"}), 400
file = request.files["file"]
if file.filename == "":
return jsonify({"error": "No file selected"}), 400
# Load image
image = skio.imread(file)
image_np = image
# Run model prediction
outputs = predictor(image_np)
instances = outputs["instances"].to("cpu")
class_names = MetadataCatalog.get(cfg.DATASETS.TEST[0]).thing_classes
# Extract bounding boxes and class IDs
boxes = instances.pred_boxes.tensor.numpy()
class_ids = instances.pred_classes.numpy()
# Filter overlapping boxes using IoU
iou_threshold = 0.8
keep_indices = []
merged_boxes = set()
for i in range(len(boxes)):
if i in merged_boxes:
continue
keep_indices.append(i)
for j in range(i + 1, len(boxes)):
if j in merged_boxes:
continue
iou = box_iou(
torch.tensor(boxes[i]).unsqueeze(0), torch.tensor(boxes[j]).unsqueeze(0)
).item()
if iou > iou_threshold:
merged_boxes.add(j)
# Calculate total cost based on non-overlapping boxes
total_cost = 0
damage_details = []
for idx in keep_indices:
class_id = class_ids[idx]
damaged_part = (
class_names[class_id] if class_id < len(class_names) else "unknown"
)
if damaged_part not in expected_parts:
damaged_part = "other"
repair_cost = cost_dict.get(damaged_part, cost_dict["other"])
total_cost += repair_cost
damage_details.append({"part": damaged_part, "cost_usd": repair_cost})
response = {"damages": damage_details, "total_cost": total_cost}
return jsonify(response)
@app.route("/fetch-image", methods=["POST"])
def fetchImage():
file = None
if "url" in request.form:
url = request.form["url"]
response = requests.get(url)
file = io.BytesIO(response.content)
elif "file" in request.files:
file = request.files["file"]
# Load image
image = io.imread(file)
image_np = image
# Run model prediction
outputs = predictor(image_np)
instances = outputs["instances"].to("cpu")
class_names = MetadataCatalog.get(cfg.DATASETS.TEST[0]).thing_classes
# Extract bounding boxes and class IDs
boxes = instances.pred_boxes.tensor.numpy()
class_ids = instances.pred_classes.numpy()
# Filter overlapping boxes using IoU
iou_threshold = 0.8
keep_indices = []
merged_boxes = set()
for i in range(len(boxes)):
if i in merged_boxes:
continue
keep_indices.append(i)
for j in range(i + 1, len(boxes)):
if j in merged_boxes:
continue
iou = box_iou(
torch.tensor(boxes[i]).unsqueeze(0), torch.tensor(boxes[j]).unsqueeze(0)
).item()
if iou > iou_threshold:
merged_boxes.add(j)
# Calculate total cost based on non-overlapping boxes
total_cost = 0
damage_details = []
for idx in keep_indices:
class_id = class_ids[idx]
damaged_part = (
class_names[class_id] if class_id < len(class_names) else "unknown"
)
if damaged_part not in expected_parts:
damaged_part = "other"
repair_cost = cost_dict.get(damaged_part, cost_dict["other"])
total_cost += repair_cost
damage_details.append({"part": damaged_part, "cost_usd": repair_cost})
response = {"damages": damage_details, "total_cost": total_cost}
return jsonify(response)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=7860)
|