Datasets:
pytc
/

License:
File size: 11,762 Bytes
d4c5d25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
import os
from typing import List, Optional, Callable
from functools import partial

import numpy as np
import torch
from scipy.interpolate import UnivariateSpline


def smooth_3d_array(points, num=None, **kwargs):
    x, y, z = points[:, 0], points[:, 1], points[:, 2]
    points = np.zeros((num, 3))
    if num is None:
        num = len(x)
    w = np.arange(0, len(x), 1)
    sx = UnivariateSpline(w, x, **kwargs)
    sy = UnivariateSpline(w, y, **kwargs)
    sz = UnivariateSpline(w, z, **kwargs)
    wnew = np.linspace(0, len(x), num)
    points[:, 0] = sx(wnew)
    points[:, 1] = sy(wnew)
    points[:, 2] = sz(wnew)
    return points


def calculate_tnb_frame(curve, epsilon=1e-8):
    curve = np.asarray(curve)

    # Calculate T (tangent)
    T = np.gradient(curve, axis=0)
    T_norms = np.linalg.norm(T, axis=1)
    T = T / T_norms[:, np.newaxis]

    # Identify straight segments
    is_straight = T_norms < epsilon

    # Calculate N (normal) for non-straight parts
    dT = np.gradient(T, axis=0)
    N = dT - np.sum(dT * T, axis=1)[:, np.newaxis] * T
    N_norms = np.linalg.norm(N, axis=1)

    # Handle points where the normal is undefined or in straight segments
    undefined_N = (N_norms < epsilon) | is_straight

    if np.all(undefined_N):
        # print("the entire curve is straight")
        # If the entire curve is straight, choose an arbitrary normal
        N = np.zeros_like(T)
        N[:, 0] = T[:, 1]
        N[:, 1] = -T[:, 0]
        N = N / np.linalg.norm(N, axis=1)[:, np.newaxis]
    elif np.any(undefined_N):
        # print("handling straight parts")
        # Only proceed with interpolation if there are any straight parts
        # Find segments of curved and straight parts
        segment_changes = np.where(np.diff(undefined_N))[0] + 1
        segments = np.split(np.arange(len(curve)), segment_changes)

        for segment in segments:
            if undefined_N[segment[0]]:
                # This is a straight segment
                left_curved = np.where(~undefined_N[: segment[0]])[0]
                right_curved = (
                    np.where(~undefined_N[segment[-1] + 1 :])[0] + segment[-1] + 1
                )

                if len(left_curved) > 0 and len(right_curved) > 0:
                    # Interpolate between left and right curved parts
                    left_N = N[left_curved[-1]]
                    right_N = N[right_curved[0]]
                    t = np.linspace(0, 1, len(segment))
                    N[segment] = (1 - t[:, np.newaxis]) * left_N + t[
                        :, np.newaxis
                    ] * right_N
                elif len(left_curved) > 0:
                    # Use normal from left curved part
                    N[segment] = N[left_curved[-1]]
                elif len(right_curved) > 0:
                    # Use normal from right curved part
                    N[segment] = N[right_curved[0]]
                else:
                    # No curved parts found, use arbitrary normal
                    N[segment] = np.array([T[segment[0]][1], -T[segment[0]][0], 0])

                # Ensure N is perpendicular to T
                N[segment] = (
                    N[segment]
                    - np.sum(N[segment] * T[segment], axis=1)[:, np.newaxis]
                    * T[segment]
                )
                N[segment] = (
                    N[segment] / np.linalg.norm(N[segment], axis=1)[:, np.newaxis]
                )
    else:
        # print("no straight parts")
        pass

    # If there are no straight parts, N is already calculated correctly for all points

    # Calculate B (binormal) ensuring orthogonality
    B = np.cross(T, N)

    # Ensure perfect orthogonality through Gram-Schmidt
    N = N - np.sum(N * T, axis=1)[:, np.newaxis] * T
    N = N / np.linalg.norm(N, axis=1)[:, np.newaxis]

    B = B - np.sum(B * T, axis=1)[:, np.newaxis] * T
    B = B - np.sum(B * N, axis=1)[:, np.newaxis] * N
    B = B / np.linalg.norm(B, axis=1)[:, np.newaxis]

    return T, N, B


def get_closest(pc_a, pc_b):
    """
    For each point in pc_a, find the closest point in pc_b
    Returns the distance and index of the closest point in pc_b for each point in pc_a
    Parameters
    ----------
    pc_a : [Mx3]
    pc_b : [Nx3]
    """
    tree = KDTree(pc_b)
    dist, idx = tree.query(pc_a, workers=-1)

    if np.max(idx) >= pc_b.shape[0]:
        raise ValueError("idx is out of range")

    return dist, idx


def straighten_using_frenet(helix, points):
    """
    Straighten the structure based on the helix (skeleton) using the Frenet frame.

    Args:
    - helix (numpy array): Points forming the helix (skeleton).
    - points (numpy array): Points surrounding the helix.

    Returns:
    - straightened_helix (numpy array): Straightened version of the helix.
    - straightened_points (numpy array): Transformed surrounding points.
    """
    # Compute the Frenet frame for the helix
    T, N, B = calculate_tnb_frame(helix)

    # Parameterize the helix based on cumulative distance (arclength)
    deltas = np.diff(helix, axis=0)
    distances = np.linalg.norm(deltas, axis=1)
    cumulative_distances = np.insert(np.cumsum(distances), 0, 0)

    # Map helix to a straight line along Z-axis
    straightened_helix = np.column_stack(
        (
            np.zeros_like(cumulative_distances),
            np.zeros_like(cumulative_distances),
            cumulative_distances,
        )
    )

    distances_to_helix, closest_idxs = get_closest(points, helix)
    vectors = points - helix[closest_idxs]
    r = distances_to_helix
    T_closest = T[closest_idxs]
    N_closest = N[closest_idxs]
    B_closest = B[closest_idxs]
    theta = np.arctan2(
        np.einsum("ij,ij->i", vectors, N_closest),
        np.einsum("ij,ij->i", vectors, B_closest),
    )
    phi = np.arccos(np.einsum("ij,ij->i", vectors, T_closest) / r)
    x = r * np.sin(phi) * np.cos(theta)
    y = r * np.sin(phi) * np.sin(theta)
    z = cumulative_distances[closest_idxs] + r * np.cos(phi)
    straightened_points = np.column_stack((x, y, z))

    return straightened_helix, np.array(straightened_points)


def frenet_transformation(pc, skel, lb):
    skel_smooth = smooth_3d_array(skel, num=skel.shape[0] * 100, s=200000)
    skel_trans, pc_trans = straighten_using_frenet(skel_smooth, pc)
    return pc_trans, skel_trans, lb


def transformation(trunk_id, pc, trunk_pc, label, frenet: bool):
    """
    Normalize the point cloud to unit sphere
    do frenet transformation

    Parameters
    ----------
    trunk_id : int
    pc
    trunk_pc
    label
    frenet : whether not to do FreNet transformation
    """

    unmodified_pc = pc.copy()
    if frenet:
        pc, trunk_pc, label = frenet_transformation(pc, trunk_pc, label)

    # NOTE: trunk_pc has variable length and cannot be collated using default_collate
    # normalize [N, 3] to unit sphere
    pc = pc - np.mean(pc, axis=0)
    m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
    pc = pc / m

    # cast to int
    label = label.astype(int)

    return trunk_id, pc, label, unmodified_pc


class CachedDataset:
    def __init__(
        self,
        output_path: str,
        num_points: int,
        folds: List[List[int]],
        fold: int,
        is_train: bool,
        transform: Optional[Callable] = None,
    ):
        self.num_points = num_points
        self.transform = transform
        self.spanning_paths = np.load(
            os.path.join(output_path, "spanning_paths.npz"), allow_pickle=True
        )["spanning_paths"].item()

        if fold == -1:
            print("Loading all folds, ignoring is_train")
            trunk_ids = self.spanning_paths.keys()
        else:
            if is_train:
                trunk_ids = [
                    item
                    for idx, sublist in enumerate(folds)
                    if idx != fold
                    for item in sublist
                ]
            else:
                trunk_ids = folds[fold]
        self.trunk_ids = sorted(trunk_ids)

        files = []
        i = 0
        for id in sorted(self.spanning_paths.keys()):
            for path in self.spanning_paths[id]:
                if id in self.trunk_ids:
                    files.append(os.path.join(output_path, f"{i}.npz"))
                    assert os.path.exists(files[-1])
                i += 1
        self.files = files

    def __len__(self):
        return len(self.files)

    def __getitem__(self, idx):
        data = np.load(self.files[idx])
        trunk_id, pc, trunk_pc, label = (
            data["trunk_id"],
            data["pc"],
            data["trunk_pc"],
            data["label"],
        )
        assert trunk_id in self.trunk_ids

        # PC is [N, 3], downsample to [num_points, 3]
        random_permutation = np.random.permutation(pc.shape[0])
        pc = pc[random_permutation[: self.num_points]]
        label = label[random_permutation[: self.num_points]]

        if self.transform is None:
            return trunk_id, pc, trunk_pc, label
        else:
            return self.transform(trunk_id, pc, trunk_pc, label)


def get_dataloader(
    species: str,
    num_points: int,
    fold: int,
    is_train: bool,
    batch_size: int,
    num_workers: int,
    frenet: bool,
    distributed: bool = False,
    collate_fn: Optional[Callable] = None,
    path_length=10000,
):
    """
    Returns FreSeg dataloader for the given species and fold

    Parameters
    ----------
    species: one of ["seg_den", "mouse", "human"]
    num_points: number of points to sample from the point cloud
    fold: -1 to fetch all folds, 0-4 for seg_den
    is_train: bool
    batch_size
    num_workers
    frenet: whether to do FreNet transformation
    distributed: bool
    collate_fn
    path_length : fixed length of skeleton path (not configurable)
    """

    assert species in ["seg_den", "mouse", "human"]
    seg_den_folds = [
        [3, 5, 11, 12, 23, 28, 29, 32, 39, 42],
        [8, 15, 19, 27, 30, 34, 35, 36, 46, 49],
        [9, 14, 16, 17, 21, 26, 31, 33, 43, 44],
        [2, 6, 7, 13, 18, 24, 25, 38, 41, 50],
        [1, 4, 10, 20, 22, 37, 40, 45, 47, 48],
    ]

    if species != "seg_den":
        assert (
            fold == -1
        ), "Fold must be -1 for mouse and human datasets, since no splits"

    dataset = CachedDataset(
        f"{species}_1000000_{path_length}",
        num_points=num_points,
        folds=seg_den_folds if species == "seg_den" else [],
        fold=fold,
        is_train=is_train,
        transform=partial(transformation, frenet=frenet),
    )

    dataloader = torch.utils.data.DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=is_train and not distributed,
        num_workers=num_workers,
        pin_memory=True,
        drop_last=is_train,
        sampler=torch.utils.data.DistributedSampler(dataset) if distributed else None,
        collate_fn=collate_fn,
    )

    return dataloader, dataset.files


if __name__ == "__main__":
    human_loader, _ = get_dataloader(
        species="human",
        num_points=1024,
        fold=-1,
        is_train=True,
        batch_size=32,
        num_workers=8,
        frenet=False,
    )
    for i, data in enumerate(human_loader):
        trunk_id, pc, label, original_pc = data
        """
        trunk_id: array of trunk ids of length batch_size
        pc: point cloud in isotropic coordinates, modified using transformation(), shape [batch, num_points, 3]
        label: corresponding value of seg volume at that point, shape [batch, num_points]
            will be 0 if part of trunk, unique spine segment id otherwise
        original_pc: point cloud in isotropic coordinates, unmodified, shape [batch, num_points, 3]
        """
        pass