import torch import gradio as gr from PIL import Image import numpy as np # Load the YOLOv5 model (adjust the path to your model if needed) model = torch.hub.load('ultralytics/yolov5', 'custom', path='yolov5s.pt') # Adjust if needed # Example function to calculate materials based on detected areas def calculate_materials(detected_objects, image_width, image_height): materials = { "cement": 0, "bricks": 0, "steel": 0 } # Proportionality factors (simplified for this example, adjust based on real-world data) for obj in detected_objects: # Calculate bounding box area in real-world units (cm or meters, as per the blueprint size) x1, y1, x2, y2 = obj['bbox'] # Coordinates of the bounding box width = (x2 - x1) * image_width # Convert to real-world width height = (y2 - y1) * image_height # Convert to real-world height # Calculate the area (length × width) area = width * height # Simplified area calculation print(f"Detected {obj['name']} with area {area} cm²") # Debugging output if obj['name'] == 'wall': # Example: For 'wall' objects materials['cement'] += area * 0.1 # Cement estimation (in kg) materials['bricks'] += area * 10 # Bricks estimation materials['steel'] += area * 0.05 # Steel estimation elif obj['name'] == 'foundation': # Example: For 'foundation' objects materials['cement'] += area * 0.2 # More cement for foundation materials['bricks'] += area * 15 # More bricks for foundation materials['steel'] += area * 0.1 # More steel for foundation return materials # Define the function for image inference def predict_image(image): # Run inference on the input image results = model(image) # Get the detected objects as pandas dataframe (xywh format) detected_objects = results.pandas().xywh[0] # First image in batch # Print out the detection results for debugging purposes print(f"Detected objects: {detected_objects}") # Set the confidence threshold (e.g., 0.5 means 50% confidence) confidence_threshold = 0.5 detected_objects = detected_objects[detected_objects['confidence'] > confidence_threshold] # Assume blueprint image size (in cm, adjust based on real-world image size) image_width = 91 # Example width in cm (adjust this) image_height = 61 # Example height in cm (adjust this) # Process the detected objects and calculate materials detected_objects_list = [] for _, row in detected_objects.iterrows(): detected_objects_list.append({ 'name': row['name'], # Detected object class name (e.g., 'wall', 'foundation') 'bbox': [row['xmin'], row['ymin'], row['xmax'], row['ymax']] # Bounding box coordinates }) # Calculate materials based on detected objects materials = calculate_materials(detected_objects_list, image_width, image_height) # Return the materials as a dictionary return materials # Set up Gradio interface for image input and JSON output interface = gr.Interface(fn=predict_image, inputs=gr.Image(), outputs=gr.JSON()) # Launch the Gradio interface if __name__ == "__main__": interface.launch()