File size: 10,157 Bytes
3341f2f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import base64
import io
from deepfurniture import DeepFurnitureDataset
from pycocotools import mask as mask_utils

def save_image_base64(image):
    """Convert PIL image to base64 string."""
    buffered = io.BytesIO()
    image.save(buffered, format="JPEG", quality=90)
    return base64.b64encode(buffered.getvalue()).decode()


def create_instance_visualization(scene_data):
    """Create combined instance visualization with both masks and bboxes."""
    image = scene_data['image']
    instances = scene_data['instances']
    
    # Image dimensions for boundary checking
    img_width, img_height = image.size
    
    # Start with image at half opacity
    vis_img = np.array(image, dtype=np.float32) * 0.5
    
    # Get all segmentations
    segmentations = []
    for inst in instances:
        if inst['segmentation']:
            rle = {
                'counts': inst['segmentation'],
                'size': [img_height, img_width]
            }
            segmentations.append(rle)
    
    # Create color map for instances with distinct colors
    colors = np.array([
        [0.9, 0.1, 0.1],  # Red
        [0.1, 0.9, 0.1],  # Green
        [0.1, 0.1, 0.9],  # Blue
        [0.9, 0.9, 0.1],  # Yellow
        [0.9, 0.1, 0.9],  # Magenta
        [0.1, 0.9, 0.9],  # Cyan
        [0.9, 0.5, 0.1],  # Orange
        [0.5, 0.9, 0.1],  # Lime
        [0.5, 0.1, 0.9],  # Purple
    ])
    colors = np.tile(colors, (len(instances) // len(colors) + 1, 1))[:len(instances)]
    
    # Draw instance masks with higher opacity
    if segmentations:
        if isinstance(segmentations[0]['counts'], (list, tuple)):
            segmentations = mask_utils.frPyObjects(
                segmentations, img_height, img_width
            )
        masks = mask_utils.decode(segmentations)
        
        for idx in range(masks.shape[2]):
            color = colors[idx]
            mask = masks[:, :, idx]
            for c in range(3):
                vis_img[:, :, c] += mask * np.array(image)[:, :, c] * 0.7 * color[c]
    
    # Convert to PIL for drawing bounding boxes
    vis_img = Image.fromarray(np.uint8(np.clip(vis_img, 0, 255)))
    draw = ImageDraw.Draw(vis_img)
    
    # Try to load a font for better text rendering
    try:
        font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 20)
    except:
        try:
            font = ImageFont.truetype("/System/Library/Fonts/Helvetica.ttc", 20)
        except:
            font = ImageFont.load_default()
    
    # Constants for text and box drawing
    text_padding = 4
    text_height = 24
    text_width = 200
    corner_length = 20
    
    # Draw bounding boxes with labels
    for idx, (instance, color) in enumerate(zip(instances, colors)):
        bbox = instance['bounding_box']
        color_tuple = tuple(int(c * 255) for c in color)
        
        # Calculate label
        furniture_id = instance['identity_id']
        category = instance['category_name']
        label = f"{category} ({furniture_id})"
        
        # Draw bbox with double lines for better visibility
        for offset in [2, 1]:
            draw.rectangle([
                max(0, bbox['xmin'] - offset),
                max(0, bbox['ymin'] - offset),
                min(img_width - 1, bbox['xmax'] + offset),
                min(img_height - 1, bbox['ymax'] + offset)
            ], outline=color_tuple, width=2)
        
        # Determine text position (handle boundary cases)
        # First try above the bbox
        text_y = bbox['ymin'] - text_height - text_padding
        if text_y < 0:  # If no space above, try below
            text_y = bbox['ymax'] + text_padding
            
        # Handle x position
        text_x = bbox['xmin']
        # If text would go beyond right edge, align to right edge
        if text_x + text_width > img_width:
            text_x = max(0, img_width - text_width)
        
        # Draw background for text
        text_pos = (text_x, text_y)
        draw.rectangle([
            text_pos[0] - 2,
            text_pos[1] - 2,
            min(img_width - 1, text_pos[0] + text_width),
            min(img_height - 1, text_pos[1] + text_height)
        ], fill='black')
        
        # Draw text
        draw.text(text_pos, label, fill=color_tuple, font=font)
        
        # Add corner markers with boundary checking
        corners = [
            (bbox['xmin'], bbox['ymin']),  # Top-left
            (bbox['xmax'], bbox['ymin']),  # Top-right
            (bbox['xmin'], bbox['ymax']),  # Bottom-left
            (bbox['xmax'], bbox['ymax'])   # Bottom-right
        ]
        
        for x, y in corners:
            # Ensure corner markers stay within image bounds
            # Horizontal lines
            x1 = max(0, x - corner_length)
            x2 = min(img_width - 1, x + corner_length)
            draw.line([(x1, y), (x2, y)], fill=color_tuple, width=3)
            
            # Vertical lines
            y1 = max(0, y - corner_length)
            y2 = min(img_height - 1, y + corner_length)
            draw.line([(x, y1), (x, y2)], fill=color_tuple, width=3)
    
    return vis_img


def process_depth_map(depth_image):
    """Process depth map for better visualization.
    
    Args:
        depth_image: PIL Image of depth map
    Returns:
        Processed depth map as PIL Image
    """
    # Convert to numpy array
    depth = np.array(depth_image)
    
    # Normalize depth to 0-1 range
    if depth.max() > depth.min():
        depth = (depth - depth.min()) / (depth.max() - depth.min())
    
    # Apply colormap (viridis-like)
    colored_depth = np.zeros((*depth.shape, 3))
    colored_depth[..., 0] = (1 - depth) * 0.4  # Red channel
    colored_depth[..., 1] = np.abs(depth - 0.5) * 0.8  # Green channel
    colored_depth[..., 2] = depth * 0.8  # Blue channel
    
    # Convert to uint8 and then to PIL
    colored_depth = (colored_depth * 255).astype(np.uint8)
    return Image.fromarray(colored_depth)


def visualize_html(dataset, scene_idx, output_path='scene.html'):
    """Generate HTML visualization for a scene."""
    scene_data = dataset[scene_idx]
    
    # Create visualizations
    instance_vis = create_instance_visualization(scene_data)
    
    depth_vis = None
    if scene_data['depth']:
        depth_vis = process_depth_map(scene_data['depth'])
    # Get base64 encoded images
    scene_img = save_image_base64(scene_data['image'])
    instance_vis = save_image_base64(instance_vis)
    depth_img = save_image_base64(depth_vis) if depth_vis else None
    
    # Create HTML with minimal CSS
    html = f'''
    <html>
    <head>
        <style>
            body {{ font-family: Arial; margin: 20px; max-width: 2000px; margin: 0 auto; }}
            .grid {{ display: grid; grid-template-columns: repeat(auto-fill, minmax(300px, 1fr)); gap: 20px; }}
            .main-images {{ 
                grid-template-columns: repeat(auto-fit, minmax(800px, 1fr)); 
                margin: 20px 0;
            }}
            .card {{ 
                border: 1px solid #ddd; 
                padding: 15px; 
                border-radius: 8px;
                box-shadow: 0 2px 4px rgba(0,0,0,0.1);
            }}
            .card h3 {{ 
                font-size: 20px;
                margin-bottom: 15px;
            }}
            img {{ max-width: 100%; height: auto; }}
            h1 {{ 
                color: #333; 
                font-size: 32px;
                text-align: center;
                margin: 30px 0;
            }}
            h2 {{ 
                color: #333; 
                font-size: 28px;
                margin: 25px 0;
            }}
            .instance-info {{ 
                color: #444; 
                font-size: 16px;
                line-height: 1.4;
            }}
            .main-images img {{
                width: 100%;
                object-fit: contain;
                max-height: 800px;  /* Increased max height */
            }}
        </style>
    </head>
    <body>
        <h1>Scene ID: {scene_data['scene_id']}</h1>
        
        <h2>Scene Visualizations</h2>
        <div class="grid main-images">
            <div class="card">
                <h3>Original Scene</h3>
                <img src="data:image/png;base64,{scene_img}">
            </div>
            <div class="card">
                <h3>Instance Visualization (Masks + Bboxes)</h3>
                <img src="data:image/png;base64,{instance_vis}">
            </div>
            {f'<div class="card"><h3>Depth Map</h3><img src="data:image/png;base64,{depth_img}"></div>' if depth_img else ''}
        </div>
        
        <h2>Furniture Instances</h2>
        <div class="grid">
    '''
    
    # Add furniture previews
    for instance in scene_data['instances']:
        furniture_id = str(instance['identity_id'])
        if furniture_id in scene_data['furniture_previews']:
            preview = save_image_base64(scene_data['furniture_previews'][furniture_id])
            bbox = instance['bounding_box']
            
            html += f'''
            <div class="card">
                <h3>{instance['category_name']} (ID: {furniture_id})</h3>
                <div class="instance-info">
                    <p>Style: {', '.join(instance['style_names'])}</p>
                    <p>BBox: ({bbox['xmin']}, {bbox['ymin']}, {bbox['xmax']}, {bbox['ymax']})</p>
                </div>
                <img src="data:image/png;base64,{preview}">
            </div>
            '''
    
    html += '''
        </div>
    </body>
    </html>
    '''
    
    with open(output_path, 'w') as f:
        f.write(html)
    print(f"Visualization saved to {output_path}")

if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset', required=True)
    parser.add_argument('--scene_idx', type=int, required=True)
    parser.add_argument('--output', default='scene.html')
    args = parser.parse_args()

    dataset = DeepFurnitureDataset(args.dataset)
    visualize_html(dataset, args.scene_idx, args.output)