File size: 3,668 Bytes
78b2af3
0ea5e5f
970cc25
 
acc4fa9
970cc25
78b2af3
a4d833a
0ea5e5f
 
2da5946
a4d833a
 
 
 
b501fc2
cec8ade
 
 
 
 
b501fc2
cec8ade
a8b1887
a4d833a
 
 
 
 
 
 
 
 
 
 
0ea5e5f
 
 
b501fc2
cec8ade
 
970cc25
 
 
 
a4d833a
970cc25
 
 
 
 
 
 
 
 
0ea5e5f
 
 
 
a4d833a
 
0ea5e5f
 
 
 
 
a4d833a
 
 
 
 
 
 
 
 
 
 
0ea5e5f
b831d17
a4d833a
 
 
 
 
 
 
 
 
 
 
 
970cc25
a4d833a
 
970cc25
a4d833a
b831d17
a4d833a
b831d17
970cc25
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import torch
import gradio as gr
from PIL import Image
from pdf2image import convert_from_path
import numpy as np
from transformers import AutoFeatureExtractor, AutoModelForObjectDetection

# Load the pre-trained DETR model and feature extractor
model = AutoModelForObjectDetection.from_pretrained("facebook/detr-resnet-50")
extractor = AutoFeatureExtractor.from_pretrained("facebook/detr-resnet-50")

# Simplified COCO label mapping (DETR uses COCO dataset)
COCO_LABELS = {56: "wall", 60: "foundation"}  # Pretending chair (56) is "wall", dining table (60) is "foundation"

# Function to calculate materials based on detected areas
def calculate_materials(detected_objects, image_width, image_height):
    materials = {
        "cement": 0,
        "bricks": 0,
        "steel": 0
    }

    for obj in detected_objects:
        x1, y1, x2, y2 = obj['bbox']
        width = (x2 - x1) * image_width  # Simplified scaling
        height = (y2 - y1) * image_height  # Simplified scaling
        area = width * height  # cm² (assuming a scale)

        print(f"Detected {obj['name']} with area {area:.2f} cm²")

        if obj['name'] == 'wall':
            materials['cement'] += area * 0.1  # kg
            materials['bricks'] += area * 10  # units
            materials['steel'] += area * 0.05  # kg
        elif obj['name'] == 'foundation':
            materials['cement'] += area * 0.2
            materials['bricks'] += area * 15
            materials['steel'] += area * 0.1

    return materials

# Function to process PDFs and convert to images
def pdf_to_image(pdf_file):
    images = convert_from_path(pdf_file, first_page=1, last_page=1)  # Convert the first page of the PDF
    return images[0]  # Return the first page as an image

# Define the function for image inference
def predict_image(file):
    # Check if the input file is a PDF or image
    if isinstance(file, str) and file.endswith('.pdf'):
        image = pdf_to_image(file)  # Convert PDF to image
    else:
        image = Image.open(file)  # Open the image file

    # Convert the image to the correct format for the model
    inputs = extractor(images=image, return_tensors="pt")

    # Run inference with DETR
    outputs = model(**inputs)

    # Post-process the outputs
    target_sizes = torch.tensor([image.size[::-1]])  # height, width
    results = extractor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.5)[0]

    detected_objects = []
    for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
        box = box.tolist()
        label_id = label.item()
        label_name = COCO_LABELS.get(label_id, "unknown")
        if label_name != "unknown":  # Only process relevant objects
            detected_objects.append({
                'name': label_name,
                'bbox': box
            })

    # Calculate materials
    image_width, image_height = image.size
    materials = calculate_materials(detected_objects, image_width, image_height)

    # Format the output for better readability
    formatted_materials = {
        "cement": f"{materials['cement']:.2f} kg",
        "bricks": f"{materials['bricks']:.0f} units",
        "steel": f"{materials['steel']:.2f} kg"
    }

    return formatted_materials

# Set up Gradio interface
interface = gr.Interface(
    fn=predict_image,
    inputs=gr.File(label="Upload Blueprint (PDF or Image)"),
    outputs=gr.JSON(label="Material Estimates"),
    title="Blueprint Material Estimator",
    description="Upload a blueprint image or PDF to estimate construction materials."
)

# Launch the interface
if __name__ == "__main__":
    interface.launch(share=False)