Spaces:
Build error
Build error
import torch | |
import gradio as gr | |
from PIL import Image | |
from pdf2image import convert_from_path | |
import numpy as np | |
from transformers import AutoFeatureExtractor, AutoModelForObjectDetection | |
# Load the pre-trained DETR model and feature extractor | |
model = AutoModelForObjectDetection.from_pretrained("facebook/detr-resnet-50") | |
extractor = AutoFeatureExtractor.from_pretrained("facebook/detr-resnet-50") | |
# Simplified COCO label mapping (DETR uses COCO dataset) | |
COCO_LABELS = {56: "wall", 60: "foundation"} # Pretending chair (56) is "wall", dining table (60) is "foundation" | |
# Function to calculate materials based on detected areas | |
def calculate_materials(detected_objects, image_width, image_height): | |
materials = { | |
"cement": 0, | |
"bricks": 0, | |
"steel": 0 | |
} | |
for obj in detected_objects: | |
x1, y1, x2, y2 = obj['bbox'] | |
width = (x2 - x1) * image_width # Simplified scaling | |
height = (y2 - y1) * image_height # Simplified scaling | |
area = width * height # cm² (assuming a scale) | |
print(f"Detected {obj['name']} with area {area:.2f} cm²") | |
if obj['name'] == 'wall': | |
materials['cement'] += area * 0.1 # kg | |
materials['bricks'] += area * 10 # units | |
materials['steel'] += area * 0.05 # kg | |
elif obj['name'] == 'foundation': | |
materials['cement'] += area * 0.2 | |
materials['bricks'] += area * 15 | |
materials['steel'] += area * 0.1 | |
return materials | |
# Function to process PDFs and convert to images | |
def pdf_to_image(pdf_file): | |
images = convert_from_path(pdf_file, first_page=1, last_page=1) # Convert the first page of the PDF | |
return images[0] # Return the first page as an image | |
# Define the function for image inference | |
def predict_image(file): | |
# Check if the input file is a PDF or image | |
if isinstance(file, str) and file.endswith('.pdf'): | |
image = pdf_to_image(file) # Convert PDF to image | |
else: | |
image = Image.open(file) # Open the image file | |
# Convert the image to the correct format for the model | |
inputs = extractor(images=image, return_tensors="pt") | |
# Run inference with DETR | |
outputs = model(**inputs) | |
# Post-process the outputs | |
target_sizes = torch.tensor([image.size[::-1]]) # height, width | |
results = extractor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.5)[0] | |
detected_objects = [] | |
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): | |
box = box.tolist() | |
label_id = label.item() | |
label_name = COCO_LABELS.get(label_id, "unknown") | |
if label_name != "unknown": # Only process relevant objects | |
detected_objects.append({ | |
'name': label_name, | |
'bbox': box | |
}) | |
# Calculate materials | |
image_width, image_height = image.size | |
materials = calculate_materials(detected_objects, image_width, image_height) | |
# Format the output for better readability | |
formatted_materials = { | |
"cement": f"{materials['cement']:.2f} kg", | |
"bricks": f"{materials['bricks']:.0f} units", | |
"steel": f"{materials['steel']:.2f} kg" | |
} | |
return formatted_materials | |
# Set up Gradio interface | |
interface = gr.Interface( | |
fn=predict_image, | |
inputs=gr.File(label="Upload Blueprint (PDF or Image)"), | |
outputs=gr.JSON(label="Material Estimates"), | |
title="Blueprint Material Estimator", | |
description="Upload a blueprint image or PDF to estimate construction materials." | |
) | |
# Launch the interface | |
if __name__ == "__main__": | |
interface.launch(share=False) | |