import torch import gradio as gr from PIL import Image import numpy as np # Load the YOLOv5 model (adjust the path to your model if needed) model = torch.hub.load('ultralytics/yolov5', 'custom', path='yolov5s.pt') # Adjust if needed # Define the function to calculate materials based on detected areas def calculate_materials(detected_objects, image_width, image_height): materials = { "cement": 0, "bricks": 0, "steel": 0 } # Proportionality factors (simplified for this example, adjust based on real-world data) for obj in detected_objects: # Get the bounding box coordinates x1, y1, x2, y2 = obj['bbox'] # Convert the bounding box coordinates to real-world units based on image size and blueprint size width = (x2 - x1) * image_width # Convert to real-world width (in cm or meters) height = (y2 - y1) * image_height # Convert to real-world height (in cm or meters) # Calculate the area (length × width) in real-world units area = width * height # cm² or m² based on the scale # Debugging output to verify bounding box size print(f"Detected {obj['name']} with area {area} cm²") # Adjust units based on the scale # Example material estimation based on detected object type if obj['name'] == 'wall': # Example: For 'wall' objects materials['cement'] += area * 0.1 # Cement estimation (in kg) materials['bricks'] += area * 10 # Bricks estimation materials['steel'] += area * 0.05 # Steel estimation elif obj['name'] == 'foundation': # Example: For 'foundation' objects materials['cement'] += area * 0.2 # More cement for foundation materials['bricks'] += area * 15 # More bricks for foundation materials['steel'] += area * 0.1 # More steel for foundation return materials # Define the function for image inference def predict_image(image): # Run inference on the input image results = model(image) # Get the detected objects as pandas dataframe (xywh format) detected_objects = results.pandas().xywh[0] # First image in batch # Debugging output: Print out the detected objects for inspection print(f"Detected objects: {detected_objects}") # Set the confidence threshold (e.g., 0.5 means 50% confidence) confidence_threshold = 0.5 detected_objects = detected_objects[detected_objects['confidence'] > confidence_threshold] # Assume blueprint image size (in cm, adjust based on real-world image size) image_width = 91 # Example width in cm (adjust this to the real-world blueprint size) image_height = 61 # Example height in cm (adjust this to the real-world blueprint size) # Process the detected objects and calculate materials detected_objects_list = [] for _, row in detected_objects.iterrows(): detected_objects_list.append({ 'name': row['name'], # Detected object class name (e.g., 'wall', 'foundation') 'bbox': [row['xmin'], row['ymin'], row['xmax'], row['ymax']] # Bounding box coordinates }) # Calculate materials based on detected objects materials = calculate_materials(detected_objects_list, image_width, image_height) # Return the materials as a dictionary return materials # Set up Gradio interface for image input and JSON output interface = gr.Interface(fn=predict_image, inputs=gr.Image(), outputs=gr.JSON()) # Launch the Gradio interface if __name__ == "__main__": interface.launch()