File size: 3,342 Bytes
78b2af3
 
 
acc4fa9
78b2af3
b831d17
cec8ade
2da5946
b831d17
b501fc2
cec8ade
 
 
 
 
b501fc2
 
cec8ade
b501fc2
 
 
 
 
 
 
 
05f6e4f
 
b501fc2
 
 
 
 
1aaad69
cec8ade
 
 
b501fc2
cec8ade
 
 
 
b831d17
 
acc4fa9
b831d17
 
 
 
 
 
1aaad69
 
 
 
b831d17
 
 
 
 
 
 
 
1aaad69
b831d17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78b2af3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import torch
import gradio as gr
from PIL import Image
import numpy as np

# Load the YOLOv5 model (adjust the path to your model if needed)
model = torch.hub.load('ultralytics/yolov5', 'custom', path='yolov5s.pt')  # Adjust if needed

# Example function to calculate materials based on detected areas
def calculate_materials(detected_objects, image_width, image_height):
    materials = {
        "cement": 0,
        "bricks": 0,
        "steel": 0
    }

    # Proportionality factors (simplified for this example, adjust based on real-world data)
    for obj in detected_objects:
        # Calculate bounding box area in real-world units (cm or meters, as per the blueprint size)
        x1, y1, x2, y2 = obj['bbox']  # Coordinates of the bounding box
        width = (x2 - x1) * image_width  # Convert to real-world width
        height = (y2 - y1) * image_height  # Convert to real-world height
        
        # Calculate the area (length × width)
        area = width * height  # Simplified area calculation
        
        print(f"Detected {obj['name']} with area {area} cm²")  # Debugging output
        
        if obj['name'] == 'wall':  # Example: For 'wall' objects
            materials['cement'] += area * 0.1  # Cement estimation (in kg)
            materials['bricks'] += area * 10  # Bricks estimation
            materials['steel'] += area * 0.05  # Steel estimation
        
        elif obj['name'] == 'foundation':  # Example: For 'foundation' objects
            materials['cement'] += area * 0.2  # More cement for foundation
            materials['bricks'] += area * 15  # More bricks for foundation
            materials['steel'] += area * 0.1  # More steel for foundation

    return materials

# Define the function for image inference
def predict_image(image):
    # Run inference on the input image
    results = model(image)
    
    # Get the detected objects as pandas dataframe (xywh format)
    detected_objects = results.pandas().xywh[0]  # First image in batch
    
    # Print out the detection results for debugging purposes
    print(f"Detected objects: {detected_objects}")
    
    # Set the confidence threshold (e.g., 0.5 means 50% confidence)
    confidence_threshold = 0.5
    detected_objects = detected_objects[detected_objects['confidence'] > confidence_threshold]
    
    # Assume blueprint image size (in cm, adjust based on real-world image size)
    image_width = 91  # Example width in cm (adjust this)
    image_height = 61  # Example height in cm (adjust this)
    
    # Process the detected objects and calculate materials
    detected_objects_list = []
    for _, row in detected_objects.iterrows():
        detected_objects_list.append({
            'name': row['name'],  # Detected object class name (e.g., 'wall', 'foundation')
            'bbox': [row['xmin'], row['ymin'], row['xmax'], row['ymax']]  # Bounding box coordinates
        })
    
    # Calculate materials based on detected objects
    materials = calculate_materials(detected_objects_list, image_width, image_height)
    
    # Return the materials as a dictionary
    return materials

# Set up Gradio interface for image input and JSON output
interface = gr.Interface(fn=predict_image, inputs=gr.Image(), outputs=gr.JSON())

# Launch the Gradio interface
if __name__ == "__main__":
    interface.launch()