Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,14 @@
|
|
1 |
import torch
|
2 |
-
import
|
3 |
from PIL import Image
|
|
|
4 |
import numpy as np
|
5 |
|
6 |
-
# Load the
|
7 |
-
model =
|
|
|
8 |
|
9 |
-
#
|
10 |
def calculate_materials(detected_objects, image_width, image_height):
|
11 |
materials = {
|
12 |
"cement": 0,
|
@@ -14,63 +16,62 @@ def calculate_materials(detected_objects, image_width, image_height):
|
|
14 |
"steel": 0
|
15 |
}
|
16 |
|
17 |
-
# Proportionality factors (simplified
|
18 |
for obj in detected_objects:
|
19 |
-
#
|
20 |
x1, y1, x2, y2 = obj['bbox']
|
21 |
-
|
22 |
-
# Convert the bounding box coordinates to real-world units based on image size and blueprint size
|
23 |
-
width = (x2 - x1) * image_width # Convert to real-world width (in cm or meters)
|
24 |
-
height = (y2 - y1) * image_height # Convert to real-world height (in cm or meters)
|
25 |
|
26 |
-
# Calculate
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
31 |
|
32 |
-
#
|
|
|
|
|
|
|
33 |
if obj['name'] == 'wall': # Example: For 'wall' objects
|
34 |
materials['cement'] += area * 0.1 # Cement estimation (in kg)
|
35 |
materials['bricks'] += area * 10 # Bricks estimation
|
36 |
materials['steel'] += area * 0.05 # Steel estimation
|
37 |
|
38 |
elif obj['name'] == 'foundation': # Example: For 'foundation' objects
|
39 |
-
materials['cement'] += area * 0.2
|
40 |
-
materials['bricks'] += area * 15
|
41 |
-
materials['steel'] += area * 0.1
|
42 |
|
43 |
return materials
|
44 |
|
45 |
# Define the function for image inference
|
46 |
def predict_image(image):
|
47 |
-
#
|
48 |
-
|
49 |
-
|
50 |
-
#
|
51 |
-
|
52 |
-
|
53 |
-
# Debugging output: Print out the detected objects for inspection
|
54 |
-
print(f"Detected objects: {detected_objects}")
|
55 |
-
|
56 |
-
# Set the confidence threshold (e.g., 0.5 means 50% confidence)
|
57 |
-
confidence_threshold = 0.5
|
58 |
-
detected_objects = detected_objects[detected_objects['confidence'] > confidence_threshold]
|
59 |
|
60 |
-
#
|
61 |
-
|
62 |
-
|
|
|
|
|
63 |
|
64 |
-
# Process the detected objects and
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
'name':
|
69 |
-
'bbox': [
|
70 |
})
|
|
|
|
|
|
|
|
|
71 |
|
72 |
-
# Calculate materials based on detected objects
|
73 |
-
materials = calculate_materials(
|
74 |
|
75 |
# Return the materials as a dictionary
|
76 |
return materials
|
|
|
1 |
import torch
|
2 |
+
from transformers import AutoFeatureExtractor, AutoModelForObjectDetection
|
3 |
from PIL import Image
|
4 |
+
import gradio as gr
|
5 |
import numpy as np
|
6 |
|
7 |
+
# Load the pre-trained DETR model
|
8 |
+
model = AutoModelForObjectDetection.from_pretrained("facebook/detr-resnet-50")
|
9 |
+
extractor = AutoFeatureExtractor.from_pretrained("facebook/detr-resnet-50")
|
10 |
|
11 |
+
# Function to calculate materials based on detected areas (example: walls and foundations)
|
12 |
def calculate_materials(detected_objects, image_width, image_height):
|
13 |
materials = {
|
14 |
"cement": 0,
|
|
|
16 |
"steel": 0
|
17 |
}
|
18 |
|
19 |
+
# Proportionality factors (simplified, adjust based on real-world data)
|
20 |
for obj in detected_objects:
|
21 |
+
# Bounding box coordinates in the format [xmin, ymin, xmax, ymax]
|
22 |
x1, y1, x2, y2 = obj['bbox']
|
|
|
|
|
|
|
|
|
23 |
|
24 |
+
# Calculate real-world dimensions (assuming you have a known scale for your blueprint)
|
25 |
+
width = (x2 - x1) * image_width # Convert to real-world width
|
26 |
+
height = (y2 - y1) * image_height # Convert to real-world height
|
27 |
+
|
28 |
+
# Calculate area
|
29 |
+
area = width * height # cm² or m² depending on your scale
|
30 |
|
31 |
+
# Print area for debugging
|
32 |
+
print(f"Detected {obj['name']} with area {area} cm²")
|
33 |
+
|
34 |
+
# Material estimation based on the object name
|
35 |
if obj['name'] == 'wall': # Example: For 'wall' objects
|
36 |
materials['cement'] += area * 0.1 # Cement estimation (in kg)
|
37 |
materials['bricks'] += area * 10 # Bricks estimation
|
38 |
materials['steel'] += area * 0.05 # Steel estimation
|
39 |
|
40 |
elif obj['name'] == 'foundation': # Example: For 'foundation' objects
|
41 |
+
materials['cement'] += area * 0.2
|
42 |
+
materials['bricks'] += area * 15
|
43 |
+
materials['steel'] += area * 0.1
|
44 |
|
45 |
return materials
|
46 |
|
47 |
# Define the function for image inference
|
48 |
def predict_image(image):
|
49 |
+
# Convert image to the required format for the model
|
50 |
+
inputs = extractor(images=image, return_tensors="pt")
|
51 |
+
|
52 |
+
# Run inference with DETR
|
53 |
+
outputs = model(**inputs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
+
# Get the predictions from the output (boxes and labels)
|
56 |
+
target_sizes = torch.tensor([image.size[::-1]]) # height, width
|
57 |
+
results = extractor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.5)[0]
|
58 |
+
|
59 |
+
detected_objects = []
|
60 |
|
61 |
+
# Process the detected objects and extract bounding boxes and class names
|
62 |
+
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
|
63 |
+
box = box.tolist() # Convert box to list
|
64 |
+
detected_objects.append({
|
65 |
+
'name': label.item(), # Get the class name
|
66 |
+
'bbox': box # Bounding box [xmin, ymin, xmax, ymax]
|
67 |
})
|
68 |
+
|
69 |
+
# Assume blueprint image size (adjust this based on your image scale)
|
70 |
+
image_width = image.size[0] # Image width in pixels
|
71 |
+
image_height = image.size[1] # Image height in pixels
|
72 |
|
73 |
+
# Calculate materials based on the detected objects
|
74 |
+
materials = calculate_materials(detected_objects, image_width, image_height)
|
75 |
|
76 |
# Return the materials as a dictionary
|
77 |
return materials
|