File size: 3,507 Bytes
90fb606
664f840
 
90fb606
664f840
 
90fb606
 
664f840
 
 
 
 
 
9f0c3eb
6b2ad08
 
 
 
 
 
 
 
 
fe97c20
 
6b2ad08
 
9f0c3eb
6b2ad08
 
9f0c3eb
 
6b2ad08
 
 
9f0c3eb
 
6b2ad08
9f0c3eb
 
 
6b2ad08
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f0c3eb
664f840
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f0c3eb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import json
import numpy as np
import torch


def convert_to_braille_unicode(str_input: str, path: str = "./braille_map.json") -> str:
    with open(path, "r") as fl:
        data = json.load(fl)

    if str_input in data.keys():
        str_output = data[str_input]
    return str_output


def braille_to_text(braille_unicode: str) -> str:
    # Grade 1 Braille map (one‐to‐one)
    braille_map = {
        '⠁':'a','⠃':'b','⠉':'c','⠙':'d','⠑':'e',
        '⠋':'f','⠛':'g','⠓':'h','⠊':'i','⠚':'j',
        '⠅':'k','⠇':'l','⠍':'m','⠝':'n','⠕':'o',
        '⠏':'p','⠟':'q','⠗':'r','⠎':'s','⠞':'t',
        '⠥':'u','⠧':'v','⠺':'w','⠭':'x','⠽':'y','⠵':'z',
        '⠀':' ',  # blank
        '⠲':'.','⠂':',','⠖':';','⠒':':','⠦':'?','⠄':"'",'⠤':'-','⠌':'/','⠣':'(','⠜':')'
    }
    number_map = {
        '⠁':'1','⠃':'2','⠉':'3','⠙':'4','⠑':'5',
        '⠋':'6','⠛':'7','⠓':'8','⠊':'9','⠚':'0'
    }

    result = []
    number_mode = False
    capitalize_next = False

    for ch in braille_unicode:
        if ch == '⠼':  # number indicator
            number_mode = True
            continue
        if ch == '⠠':  # capital indicator
            capitalize_next = True
            continue

        if number_mode:
            if ch in number_map:
                result.append(number_map[ch])
                continue
            else:
                number_mode = False

        txt = braille_map.get(ch, '?')
        if capitalize_next and txt.isalpha():
            txt = txt.upper()
            capitalize_next = False

        result.append(txt)

    return ''.join(result)

def parse_xywh_and_class(boxes: torch.Tensor) -> list:
    """
    boxes input tensor
        boxes (torch.Tensor) or (numpy.ndarray): A tensor or numpy array containing the detection boxes,
            with shape (num_boxes, 6).
        orig_shape (torch.Tensor) or (numpy.ndarray): Original image size, in the format (height, width).
    Properties:
        xyxy (torch.Tensor) or (numpy.ndarray): The boxes in xyxy format.
        conf (torch.Tensor) or (numpy.ndarray): The confidence values of the boxes.
        cls (torch.Tensor) or (numpy.ndarray): The class values of the boxes.
        xywh (torch.Tensor) or (numpy.ndarray): The boxes in xywh format.
        xyxyn (torch.Tensor) or (numpy.ndarray): The boxes in xyxy format normalized by original image size.
        xywhn (torch.Tensor) or (numpy.ndarray): The boxes in xywh format normalized by original image size.
    """

    # copy values from troublesome "boxes" object to numpy array
    new_boxes = np.zeros(boxes.shape)
    new_boxes[:, :4] = boxes.xywh.numpy()  # first 4 channels are xywh
    new_boxes[:, 4] = boxes.conf.numpy()  # 5th channel is confidence
    new_boxes[:, 5] = boxes.cls.numpy()  # 6th channel is class which is last channel

    # sort according to y coordinate
    new_boxes = new_boxes[new_boxes[:, 1].argsort()]

    # find threshold index to break the line
    y_threshold = np.mean(new_boxes[:, 3]) // 2
    boxes_diff = np.diff(new_boxes[:, 1])
    threshold_index = np.where(boxes_diff > y_threshold)[0]

    # cluster according to threshold_index
    boxes_clustered = np.split(new_boxes, threshold_index + 1)
    boxes_return = []
    for cluster in boxes_clustered:
        # sort according to x coordinate
        cluster = cluster[cluster[:, 0].argsort()]
        boxes_return.append(cluster)

    return boxes_return