Kushalmanda commited on
Commit
0ea5e5f
·
verified ·
1 Parent(s): c7937b2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -42
app.py CHANGED
@@ -1,12 +1,14 @@
1
  import torch
2
- import gradio as gr
3
  from PIL import Image
 
4
  import numpy as np
5
 
6
- # Load the YOLOv5 model (adjust the path to your model if needed)
7
- model = torch.hub.load('ultralytics/yolov5', 'custom', path='yolov5s.pt') # Adjust if needed
 
8
 
9
- # Define the function to calculate materials based on detected areas
10
  def calculate_materials(detected_objects, image_width, image_height):
11
  materials = {
12
  "cement": 0,
@@ -14,63 +16,62 @@ def calculate_materials(detected_objects, image_width, image_height):
14
  "steel": 0
15
  }
16
 
17
- # Proportionality factors (simplified for this example, adjust based on real-world data)
18
  for obj in detected_objects:
19
- # Get the bounding box coordinates
20
  x1, y1, x2, y2 = obj['bbox']
21
-
22
- # Convert the bounding box coordinates to real-world units based on image size and blueprint size
23
- width = (x2 - x1) * image_width # Convert to real-world width (in cm or meters)
24
- height = (y2 - y1) * image_height # Convert to real-world height (in cm or meters)
25
 
26
- # Calculate the area (length × width) in real-world units
27
- area = width * height # cm² or based on the scale
28
-
29
- # Debugging output to verify bounding box size
30
- print(f"Detected {obj['name']} with area {area} cm²") # Adjust units based on the scale
 
31
 
32
- # Example material estimation based on detected object type
 
 
 
33
  if obj['name'] == 'wall': # Example: For 'wall' objects
34
  materials['cement'] += area * 0.1 # Cement estimation (in kg)
35
  materials['bricks'] += area * 10 # Bricks estimation
36
  materials['steel'] += area * 0.05 # Steel estimation
37
 
38
  elif obj['name'] == 'foundation': # Example: For 'foundation' objects
39
- materials['cement'] += area * 0.2 # More cement for foundation
40
- materials['bricks'] += area * 15 # More bricks for foundation
41
- materials['steel'] += area * 0.1 # More steel for foundation
42
 
43
  return materials
44
 
45
  # Define the function for image inference
46
  def predict_image(image):
47
- # Run inference on the input image
48
- results = model(image)
49
-
50
- # Get the detected objects as pandas dataframe (xywh format)
51
- detected_objects = results.pandas().xywh[0] # First image in batch
52
-
53
- # Debugging output: Print out the detected objects for inspection
54
- print(f"Detected objects: {detected_objects}")
55
-
56
- # Set the confidence threshold (e.g., 0.5 means 50% confidence)
57
- confidence_threshold = 0.5
58
- detected_objects = detected_objects[detected_objects['confidence'] > confidence_threshold]
59
 
60
- # Assume blueprint image size (in cm, adjust based on real-world image size)
61
- image_width = 91 # Example width in cm (adjust this to the real-world blueprint size)
62
- image_height = 61 # Example height in cm (adjust this to the real-world blueprint size)
 
 
63
 
64
- # Process the detected objects and calculate materials
65
- detected_objects_list = []
66
- for _, row in detected_objects.iterrows():
67
- detected_objects_list.append({
68
- 'name': row['name'], # Detected object class name (e.g., 'wall', 'foundation')
69
- 'bbox': [row['xmin'], row['ymin'], row['xmax'], row['ymax']] # Bounding box coordinates
70
  })
 
 
 
 
71
 
72
- # Calculate materials based on detected objects
73
- materials = calculate_materials(detected_objects_list, image_width, image_height)
74
 
75
  # Return the materials as a dictionary
76
  return materials
 
1
  import torch
2
+ from transformers import AutoFeatureExtractor, AutoModelForObjectDetection
3
  from PIL import Image
4
+ import gradio as gr
5
  import numpy as np
6
 
7
+ # Load the pre-trained DETR model
8
+ model = AutoModelForObjectDetection.from_pretrained("facebook/detr-resnet-50")
9
+ extractor = AutoFeatureExtractor.from_pretrained("facebook/detr-resnet-50")
10
 
11
+ # Function to calculate materials based on detected areas (example: walls and foundations)
12
  def calculate_materials(detected_objects, image_width, image_height):
13
  materials = {
14
  "cement": 0,
 
16
  "steel": 0
17
  }
18
 
19
+ # Proportionality factors (simplified, adjust based on real-world data)
20
  for obj in detected_objects:
21
+ # Bounding box coordinates in the format [xmin, ymin, xmax, ymax]
22
  x1, y1, x2, y2 = obj['bbox']
 
 
 
 
23
 
24
+ # Calculate real-world dimensions (assuming you have a known scale for your blueprint)
25
+ width = (x2 - x1) * image_width # Convert to real-world width
26
+ height = (y2 - y1) * image_height # Convert to real-world height
27
+
28
+ # Calculate area
29
+ area = width * height # cm² or m² depending on your scale
30
 
31
+ # Print area for debugging
32
+ print(f"Detected {obj['name']} with area {area} cm²")
33
+
34
+ # Material estimation based on the object name
35
  if obj['name'] == 'wall': # Example: For 'wall' objects
36
  materials['cement'] += area * 0.1 # Cement estimation (in kg)
37
  materials['bricks'] += area * 10 # Bricks estimation
38
  materials['steel'] += area * 0.05 # Steel estimation
39
 
40
  elif obj['name'] == 'foundation': # Example: For 'foundation' objects
41
+ materials['cement'] += area * 0.2
42
+ materials['bricks'] += area * 15
43
+ materials['steel'] += area * 0.1
44
 
45
  return materials
46
 
47
  # Define the function for image inference
48
  def predict_image(image):
49
+ # Convert image to the required format for the model
50
+ inputs = extractor(images=image, return_tensors="pt")
51
+
52
+ # Run inference with DETR
53
+ outputs = model(**inputs)
 
 
 
 
 
 
 
54
 
55
+ # Get the predictions from the output (boxes and labels)
56
+ target_sizes = torch.tensor([image.size[::-1]]) # height, width
57
+ results = extractor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.5)[0]
58
+
59
+ detected_objects = []
60
 
61
+ # Process the detected objects and extract bounding boxes and class names
62
+ for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
63
+ box = box.tolist() # Convert box to list
64
+ detected_objects.append({
65
+ 'name': label.item(), # Get the class name
66
+ 'bbox': box # Bounding box [xmin, ymin, xmax, ymax]
67
  })
68
+
69
+ # Assume blueprint image size (adjust this based on your image scale)
70
+ image_width = image.size[0] # Image width in pixels
71
+ image_height = image.size[1] # Image height in pixels
72
 
73
+ # Calculate materials based on the detected objects
74
+ materials = calculate_materials(detected_objects, image_width, image_height)
75
 
76
  # Return the materials as a dictionary
77
  return materials