|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:22:29.171971Z" |
|
}, |
|
"title": "Sign Language Motion Capture Dataset for Data-driven Synthesis", |
|
"authors": [ |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "Jedli\u010dka", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "NTIS -New Technologies for the Information Society", |
|
"institution": "University of West Bohemia Univerzitn\u00ed 8", |
|
"location": { |
|
"postCode": "306 14", |
|
"settlement": "Pilsen", |
|
"country": "Czech Republic" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Zden\u011bk", |
|
"middle": [], |
|
"last": "Kr\u0148oul", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "NTIS -New Technologies for the Information Society", |
|
"institution": "University of West Bohemia Univerzitn\u00ed 8", |
|
"location": { |
|
"postCode": "306 14", |
|
"settlement": "Pilsen", |
|
"country": "Czech Republic" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Jakub", |
|
"middle": [], |
|
"last": "Kanis", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "NTIS -New Technologies for the Information Society", |
|
"institution": "University of West Bohemia Univerzitn\u00ed 8", |
|
"location": { |
|
"postCode": "306 14", |
|
"settlement": "Pilsen", |
|
"country": "Czech Republic" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper presents a new 3D motion capture dataset of Czech Sign Language (CSE). Its main purpose is to provide the data for further analysis and data-based automatic synthesis of CSE utterances. The content of the data in the given limited domain of weather forecasts was carefully selected by the CSE linguists to provide the necessary utterances needed to produce any new weather forecast. The dataset was recorded using the state-of-the-art motion capture (MoCap) technology to provide the most precise trajectories of the motion. In general, MoCap is a device capable of accurate recording of motion directly in 3D space. The data contains trajectories of body, arms, hands and face markers recorded at once to provide consistent data without the need for the time alignment.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper presents a new 3D motion capture dataset of Czech Sign Language (CSE). Its main purpose is to provide the data for further analysis and data-based automatic synthesis of CSE utterances. The content of the data in the given limited domain of weather forecasts was carefully selected by the CSE linguists to provide the necessary utterances needed to produce any new weather forecast. The dataset was recorded using the state-of-the-art motion capture (MoCap) technology to provide the most precise trajectories of the motion. In general, MoCap is a device capable of accurate recording of motion directly in 3D space. The data contains trajectories of body, arms, hands and face markers recorded at once to provide consistent data without the need for the time alignment.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Sign language (SL) is a way of communication that utilizes the movement of a human body. It uses manual, facial, and other body movements to express information. SL is a basic communication system of deaf people and it is often their natural way of communication. According to (Naert et al., 2017) , deaf people are often facing problem using written language (based on the spoken language), because it uses the different grammatical rules, and the nature and the spatial organization of linguistic concepts as well. However, most information in the media or the Internet is available in the spoken or the written form. Thus it leads to difficulties for deaf people to access the information. Computer animation techniques have experienced great improvement recently. There have been developed devices dedicated to the recording of a movement in high precision in 3D space. Animations computed from the data recorded in this way are of high quality and accurate, and their usage is increasingly common outside the film and the computer game industry. An artificial avatar is one possible output of such animation. In public television as an example, they use translation made by a signer which is shown in a window added into the screen. However, the avatar technology is more flexible compared to the real SL signer. It has editable content that can be produced more easily than video (no recording studio with camera) and which also preserves the anonymity of the signer. Using an animated artificial avatar with automatic SL synthesis seems to be a good way to improve the actual way of using CSE on TV. Recently, some approaches based on key-frame techniques and procedural synthesis have been developed. These approaches provide fine control over the movements of the avatar. These avatars are however poorly accepted by the deaf community because of their lack of human-like motion. There are some works that aim to deal with this problem. In (McDonald et al., 2016) for example, authors added noise measured from MoCap data to the rule-based synthesis to improve the performance of the avatar. Data-driven synthesis, on the other hand, preserves the motion of an original SL signer. In this paper, we introduce, by our best knowledge, the first MoCap dataset of CSE. This dataset consists of both dictionary items and continuous signing. Manual and nonmanual components were recorded simultaneously and the setup includes a high number of markers placed on the face, the body and fingers in order to provide precise and synchronous data. As the main purpose of creating this dataset is to develop an automatic SL synthesis, we also suggest the methods for evaluating the synthesized data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 277, |
|
"end": 297, |
|
"text": "(Naert et al., 2017)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1949, |
|
"end": 1972, |
|
"text": "(McDonald et al., 2016)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Most SL datasets are recorded by an optical camera as they are the most affordable device for this purpose and the recording setup is fast. The difference in data output from the MoCap system and video output is that the MoCap system provides 3D data directly and therefore can be more precise. Although, there are techniques developed for the pose estimation from the image or video, e.g. OpenPose (Cao et al., 2017) , the 3D precision is in principle lower than the actual 3D pose measuring provided by the MoCap system. Some datasets using different motion capture techniques were created in recent years. (Lu and Huenerfauth, 2010) recorded American SL using magnetic-based motion capture for hand and finger tracking. The evolution of motion capture datasets collected in French SL is described in (Gibet, 2018) . They recorded three MoCap datasets in the last 15 years. All of them contain manual and nonmanual components of SL. The project HuGEx (2005) used Cybergloves for recording finger movements and the Vicon MoCap system for the body and the facial movements. The total recording time was 50 minutes. The next project, SignCom (2011) uses the Vicon MoCap system to record all components and the recording time was 60 minutes, but only 6 markers per hand were used for the hand and finger recording. The most recent project Sign3D (2014) has all components recorded with the Vicon system and the eye gaze was recorded with a head-mounted oculometer (Mo-capLab MLab 50-W). It has 10 minutes of recorded data. There is a continual need for a large amount of data to utilize machine learning techniques. Although the quality and size of datasets are increasing, there is still a lack of such data. The usual size of those datasets is between 10 and 60 minutes of recording time.", |
|
"cite_spans": [ |
|
{ |
|
"start": 399, |
|
"end": 417, |
|
"text": "(Cao et al., 2017)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 609, |
|
"end": 635, |
|
"text": "(Lu and Huenerfauth, 2010)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 803, |
|
"end": 816, |
|
"text": "(Gibet, 2018)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Our aim is to record the SL dataset usable for automatic synthesis and evaluation of new utterances. In order to synthesize any given utterance, the language domain was limited to the terms used in the weather forecast. The weather forecast domain was also selected because of the availability of reference video recordings of daily forecasts in SL from a recent couple of years. The size of the vocabulary is reasonably limited for our purposes. There are some differences in SL expressions depending on the location due to different dialects of CSE, therefore, we used the video source provided by the Czech national television because the used signs are considered as well understandable and recognizable to most of the audience. CSE linguist experts selected 36 weather forecasts broadcasted throughout the year in order to provide different expressions needed for weather forecasts in different seasons to provide all the necessary data for further synthesis of any weather forecast in the future.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Design", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "The Motion capture (MoCap) recording is the process of recording the movements using specialized devices in order to reconstruct motions in the 3D space during the time. There are different approaches for data acquisition using MoCap techniques and there are also devices dedicated to the MoCap recording of different body parts. We did some experimental recordings using a different variation of devices such as Cybergloves2 for finger and VICON Cara for facial recording . The main problem with the usage of such devices was signer's discomfort and limitations to performed movements (e.g. tight gloves reduce free movement of fingers, Cara devices camera placement denies finger-face interactions). Another issue was synchronization and calibration (data alignment in general) of different devices as described in (Huenerfauth et al., 2008) and . Recording all modalities (arm, hand pose, and facial movement) using one device emerged as the best solution. In our solution using an optical-based MoCap system, the signer is equipped with lightweight markers only, and there is no need for merging data together. The only limitation is that the optical-based approach needs a clear line of view from cameras to markers and, therefore, is sensitive to occlusions of body parts. A large number of cameras are needed as well as their precise placement, for such a complex movement like SL utterances.", |
|
"cite_spans": [ |
|
{ |
|
"start": 817, |
|
"end": 843, |
|
"text": "(Huenerfauth et al., 2008)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recording Setup", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "We used the optical-based MoCap system consisting of 18 VICON cameras (8xT-20, 4xT-10, 6xVero) for dataset recording and one RGB camera as referential and two Kinects v2 for additional data acquisition. MoCap recording frequency was 120Hz. The placement of cameras shown in Figure 1 was developed to cover the place in front of the signer in order to avoid occlusions as much as possible and in order to focus on facial expressions. Camera placement was also adjusted for the particular signer to reduce gaps in trajectories caused by occlusions. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 274, |
|
"end": 282, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Motion Capture Setup", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "The markers placed on the face and fingers were selected to cause minimal disturbance to the signer. We used different marker sizes and shapes for different body parts (see Table 1 and Figure 2 ). We tracked the upper body and arms by a pair of markers placed on the axis of joints completed by some referential markers. The positions of markers on the face were selected to follow facial muscles and wrinkles. We used 8mm spherical markers around the face, 4 mm hemispherical markers for facial features with the exception of nasolabial folds with 2.5 mm hemispherical markers. The eye gaze and eyelid movement were not tracked by the MoCap device, but it can be obtained from the reference video. Two markers for palm tracking are placed on the index and small finger metacarpals. We tracked fingers using three 4 mm hemispherical markers per finger placed in the middle of each finger phalanx and thumb metacarpals. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 193, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Subject Setup", |
|
"sec_num": "4.2." |
|
}, |
|
{ |
|
"text": "We have recorded approximately 30 minutes of continuous signing (> 200000 frames) and 12 minutes of dictionary items. All data were recorded by one expert CSE signer, who was monitored by another CSE expert during the process. The dataset contains 36 weather forecasts. On average, each such forecast is 30 seconds long and contains 35 glosses. The dictionary contains 318 different glosses. Those dictionary items are single utterances surrounded by the posture with loose hands and arms (a rest pose) in order not to be affected by any context. Dataset processing is a very demanding work both in terms of time and demands for expert annotation and MoCap data postprocessing. MoCap data have to be processed in order to ensure proper labeling of each marker and to fill eventual gaps in marker trajectories. The next step of MoCap data processing is to solve the marker trajectories (Figure fig:MarkerSetup ) to the form of the skeleton model shown in Figure 5 . Solving provides data in the angular domain of each body part. Those data can be used directly for the animation. Another important step in the processing of the dataset is the annotation of content. We used the well-known Elan annotation tool for this purpose, see (Crasborn and Sloetjes, 2008) . The reference video of data was used for the annotation as it provides the possibility to annotate the data without need of rendering the MoCap data but it lacks precision because of lower frame-rate (120 fps MoCap vs. 25 fps video). This annotation was made by the CSE native signer. It contains time stamps dividing the data into different signs, transitions between signs and rest pose in onetier, see Figure 3 . The aim of this annotation is to roughly capture those moments of change and it will be used as initialization for a data-driven segmentation/synthesis process. Although annotation is still in progress, almost 80% is already done. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 1231, |
|
"end": 1260, |
|
"text": "(Crasborn and Sloetjes, 2008)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 885, |
|
"end": 908, |
|
"text": "(Figure fig:MarkerSetup", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 954, |
|
"end": 962, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF4" |
|
}, |
|
{ |
|
"start": 1668, |
|
"end": 1676, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset Parameters", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "The best way and till now mostly used method for expressing the quality or comparing the similarity of two signs is using subjective evaluating by SL native signers. However, this evaluation is both time and human resources demanding process and moreover usually more than one person is needed for the subjectivity of the evaluation, see (Huenerfauth et al., 2008) . The popularity of automatic and machine learning techniques utilization for data-processing related tasks increased in recent years. An objective criterion in the form of a cost function is crucial for such techniques but it is usually not trivial to choose one. The purpose of such a function is not to replace the human evaluation of the synthesis result, but to provide a proper cost function for machine learning techniques as they need fast evaluation during training process. The data provided by the MoCap recording are trajectories of all markers. The advantage of such data is direct information of the positions in the 3D space but the human body topology (skeleton) may not be respected in such representation. On the other hand, angular trajectories of bones are bound to the exact human body topology. The topology of a signer is constant during the time. This can improve the consistency of the data if signs from single signer are compared. In both cases, one frame can be considered as a vector of values and the duration of two similar utterances can differ, although the meaning is the same. The signs and utterances are the time-sequences of these vectors. The usual metrics (among the others) for evaluating difference/similarity between two single vectors p = (p 0 , p 1 , ..., p i , ...p N ) and q = (q 0 , q 1 , ..., q i , ...q N ) of the same length N are: \u2022 Root mean square error (RMSE):", |
|
"cite_spans": [ |
|
{ |
|
"start": 338, |
|
"end": 364, |
|
"text": "(Huenerfauth et al., 2008)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and Synthesis Evaluation", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2022 Euclidean distance: d = N i=0 (q i \u2212 p i ) 2 ,", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Data and Synthesis Evaluation", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "d = N i=0 (p i \u2212 q i ) 2 N ,", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Data and Synthesis Evaluation", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "\u2022 Correlation coefficients (Corr):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and Synthesis Evaluation", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "d = N i=0 (p i \u2212 p)(q i \u2212 q) N i=0 (p i \u2212 p) 2 N i=0 (q i \u2212 q) 2 ,", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Data and Synthesis Evaluation", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "where p and q are mean values of p and q respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and Synthesis Evaluation", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "The time component of the data (the time-sequence of the vectors) can be addressed by the following approaches. One of them is a time alignment in the form of re-sampling the time-sequence of two compared components to the same length and then measure the distance. In (Sedmidubsky et al., 2018) they used normalization for motion data comparison for query purposes in the form of the time axis movement sequence normalization and Euclidean distance for each motion. Dynamic time warping (Berndt and Clifford, 1994 ) (DTW) is commonly used algorithm for the time-series comparison. This method computes the best per frame alignment in terms of the chosen distance. It provides us a possibility to get minimal distance of two time-sequence with different lengths, for example two utterances with different signing pace. The computed DTW distance d D T W is a minimal distance with the optimal time alignment of sequences p and q, path describes the alignment of the vectors:", |
|
"cite_spans": [ |
|
{ |
|
"start": 488, |
|
"end": 514, |
|
"text": "(Berndt and Clifford, 1994", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and Synthesis Evaluation", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "d DT W , path = DT W (p, q).", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Data and Synthesis Evaluation", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "We tested the DTW algorithm with the Euclidean distance (1) for measuring a distance between two different signs and between different instances of the same sign. We limited this test for the signs with meanings \"one\", \"two\", \"three\", \"four\", and \"five\", both from the dictionary and the continuous signing and compared measured distances between signs with the same meaning (different instance of the same sign) and different signs (all instances of other signs from the same test-set). The DTW distance was measured between two signs, the distance was normalized to the vector size and the length of the DTW path, so the distance is independent on the skeleton complexity and duration of the sequence. The normalized DTW d normDT W distance is defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and Synthesis Evaluation", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "d normDT W = d DT W M \u2022 N ,", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Data and Synthesis Evaluation", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "where M is the length of the path from DTW algorithm and N is the number of channels of the data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and Synthesis Evaluation", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "distances [deg] (same meaning) (different meaning) \"one\" 0.84 -1.79 2.49 -8.67 \"two\" 0.45 -1.29 2.49 -7.08 \"three\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sign distances [deg]", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "1.18 2.54 -5.80 \"four\" 0.33 -0.85 3.24 -8.67 \"five\" 0.33 -0.85 2.49 -7.78 Table 2 : Normalized DTW distances between signs (handshapes only).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 81, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sign distances [deg]", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The Euclidean distances of angular trajectories computed using DTW are summarized in Tables 2 and 3 for handshape only and for the whole body (hand included) respectively. The tested signs (numbers from 1 to 5) were chosen because they are very similar and differs only in the handshape. The signs are compared to other instances with the same meaning and to all instances of all different signs (e.g.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 99, |
|
"text": "Tables 2 and 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sign distances [deg]", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "distances [deg] (same meaning) (different meaning) \"one\" 2.30 -3.25 3.08 -6.90 \"two\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sign distances [deg]", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "1.04 -3.59 2.74 -6.37 \"three\" 2.58 2.94 -5.42 \"four\" 0.89 -3.28 2.73 -6.90 \"five\" 1.10 -2.07 3.13 -5.57 Table 3 : Normalized DTW distances between signs (whole body without face).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 111, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sign distances [deg]", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "all instances with the meaning \"one\" are compared to all other instances with the same meaning and to all instances with different meanings such as \"two\", \"three\", ...). According to the results in Table 3 , using normalized DTW distance for raw trajectories of the angular representation seems to have the ability to objectively measure the difference between signs, because the distance is generally lower for the signs with the same meaning than others.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 198, |
|
"end": 205, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sign distances [deg]", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In case of the hand-shapes (Table 2 , there seems to be the ability to not only measure the distances between signs with the same meanings but also to distinct different signs completely. We suggest some approaches to improve the evaluation of distances calculated by DTW. We can use different weights for the distance measure for different bones based on its corresponding importance for the signs distinction. We can also use trajectories of different body parts to compare signs components separately. For example, compare handshapes, palm orientation and location with their counterparts respectively to enable more precise modeling of SL grammar such as classifiers, the co-occurrence of manual and non-manual, etc.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 35, |
|
"text": "(Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sign distances [deg]", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We propose the following baseline technique for the SL utterance synthesis. The purpose of this baseline is not to solve the synthesis problem itself but to provide a reference algorithm and performance for further developed and more sophisticated techniques. We assemble the utterance from dictionary item trajectories for each sign. Then we compute trajectories of transition movement between these signs. We set the fixed length for all transitions as the average length of all transitions in our dataset. We interpolated the transition trajectory for each joint by the cubic spline. For evaluation, we compared the synthesized utterance with the utterance captured in the continuous signing by the normalized DTW with Euclidean distance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods", |
|
"sec_num": "7.1." |
|
}, |
|
{ |
|
"text": "We selected a pair of utterances that have more appearances in the dataset in order to provide a comparison with a reference.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7.2." |
|
}, |
|
{ |
|
"text": "\u2022 Utterance 1: \"zima-hory-kolem\" (literal translation: cold-hills-approximately). Confusion matrix is shown in Table 4 \u2022 Utterance 2: \"pocasi-zitra-bude\" (literal translation: weather-tomorrow-will be). Confusion matrix is shown in Table 5 In confusion matrices (Tables 4 and 5) , we can see the normalized DTW distances of the synthesized utterance compared to utterances with the same meaning that appear in continuous signing. For reference, we added a comparison with the utterance with other meaning. Table 5 : Confusion matrix of normalized DTW distances for utterance 2. Synthesised data (synth), compared with real data (appear1-3) and other utterance with different meaning.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 118, |
|
"text": "Table 4", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 232, |
|
"end": 239, |
|
"text": "Table 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 262, |
|
"end": 278, |
|
"text": "(Tables 4 and 5)", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 506, |
|
"end": 513, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7.2." |
|
}, |
|
{ |
|
"text": "The comparison of the normalized DTW distances shows larger differences between synthesized utterance and examples from continuous data then among the continuous data. We can also distinct different utterances from each other. The difference between synthesized data and examples from continuous data can be caused by various reasons. We try to explain some of those in the following discussion.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7.2." |
|
}, |
|
{ |
|
"text": "There is a difference in the pacing and the method of signing for signs in the dictionary and the same signs in the continuous signing. On average, the dictionary signs are more than twice longer than signs from continuous signing. The average duration of signs in our dataset is 0.81/0.38 seconds in dictionary/continuous signing. There are also differences in signs that consist of repetitive moves. Usually, more repetitions are made in dictionary items than in continuous signing. Those differences are insignificant in human understanding of the sign but enlarge the measured distance. The transitions are synthesized with a constant length and such an approximation does not correspond with the observed reality. The cubic spline interpolation is also heavily dependant on the annotation's precise selection of the start and the end point and also does not respect the nature of the human movement.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "8." |
|
}, |
|
{ |
|
"text": "We presented a new 3D motion capture dataset of Czech Sign Language (CSE), which we would like to share with the community. Its main purpose is to provide the data for further analysis and data-based automatic synthesis of CSE utterances. The dataset was recorded using the state-of-theart motion capture technology to provide the most precise trajectories of the motion. The size of the dataset and the precision of tracked components are comparable to the best existing datasets for other SLs. The dataset contains trajectories of body, arms, hands and face markers recorded at once in order to provide consistent data without the need for the time alignment. We introduced a baseline for the data-driven synthesis of SL utterances and suggested a method for objective data evaluation in the form of normalized DTW algorithm and Euclidean distance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "9." |
|
}, |
|
{ |
|
"text": "In future work, we will focus on improving the quality of the synthesis by using machine learning techniques and the normalized DTW distance as an objective function. We would also like to verify the correlation between objective and subjective evaluations. We also would like to further improve synthesis by adding a non-manual property as well as other more complex SL grammar concepts. This will require annotations in more than one-tier. The additional annotation can be done in semi-automatic or fully automatic mode. It will also be beneficial to use multiple annotators on the same task to eliminate human errors and improve the precision of an annotation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "9." |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was supported by the Ministry of Education of the Czech Republic, project No. LTARF18017. This paper was supported by the Ministry of Education, Youth and Sports of the Czech Republic project No. LO1506. This work was supported by the European Regional Development Fund under the project AI&Reasoning (reg. no. CZ.02.1.01/0.0/0.0/15 003/0000466).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "acknowledgement", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Using dynamic time warping to find patterns in time series", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Berndt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Clifford", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "KDD workshop", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "359--370", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Berndt, D. J. and Clifford, J. (1994). Using dynamic time warping to find patterns in time series. In KDD work- shop, volume 10, pages 359-370. Seattle, WA.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Realtime multi-person 2d pose estimation using part affinity fields", |
|
"authors": [ |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Simon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Sheikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1302--1310", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cao, Z., Simon, T., Wei, S., and Sheikh, Y. (2017). Re- altime multi-person 2d pose estimation using part affin- ity fields. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1302-1310.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Enhanced ELAN functionality for sign language corpora", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Crasborn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Sloetjes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "6th International Conference on Language Resources and Evaluation (LREC 2008) 3rd Workshop on the Representation and Processing of Sign Languages: Construction and Exploitation of Sign Language Corpora", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "39--43", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Crasborn, O. and Sloetjes, H. (2008). Enhanced ELAN functionality for sign language corpora. In 6th Interna- tional Conference on Language Resources and Evalua- tion (LREC 2008) 3rd Workshop on the Representation and Processing of Sign Languages: Construction and Exploitation of Sign Language Corpora, pages 39-43.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Building French Sign Language Motion Capture Corpora for Signing Avatars", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Gibet", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Workshop on the Representation and Processing of Sign Languages: Involving the Language Community, LREC 2018", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gibet, S. (2018). Building French Sign Language Mo- tion Capture Corpora for Signing Avatars. In Work- shop on the Representation and Processing of Sign Lan- guages: Involving the Language Community, LREC 2018, Miyazaki, Japan, May.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Evaluation of american sign language generation by native asl signers", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Huenerfauth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Allbeck", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "ACM Transactions on Accessible Computing (TACCESS)", |
|
"volume": "1", |
|
"issue": "1", |
|
"pages": "1--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huenerfauth, M., Zhao, L., Gu, E., and Allbeck, J. (2008). Evaluation of american sign language generation by na- tive asl signers. ACM Transactions on Accessible Com- puting (TACCESS), 1(1):1-27.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Semiautomatic data glove calibration for sign language corpora building", |
|
"authors": [ |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Kr\u0148oul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Kanis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "\u017delezn\u1ef3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "7th Workshop on the Representation and Processing of Sign Languages: Corpus Mining, LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kr\u0148oul, Z., Kanis, J.,\u017delezn\u1ef3, M., and M\u00fcller, L. (2016). Semiautomatic data glove calibration for sign language corpora building. In 7th Workshop on the Representa- tion and Processing of Sign Languages: Corpus Mining, LREC.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Toward sign language motion capture dataset building", |
|
"authors": [ |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Kr\u0148oul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Jedli\u010dka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Kanis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "And\u017eelezn\u00fd", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Speech and Computer", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kr\u0148oul, Z., Jedli\u010dka, P., Kanis, J., and\u017delezn\u00fd, M. (2016). Toward sign language motion capture dataset building. In Speech and Computer, pages 706-713, Cham, 08. Springer International Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Collecting a motioncapture corpus of american sign language for data-driven generation research", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Huenerfauth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the NAACL HLT 2010 Workshop on Speech and Language Processing for Assistive Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "89--97", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lu, P. and Huenerfauth, M. (2010). Collecting a motion- capture corpus of american sign language for data-driven generation research. In Proceedings of the NAACL HLT 2010 Workshop on Speech and Language Processing for Assistive Technologies, pages 89-97. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A new tool to facilitate prosodic analysis of motion capture data and a data-driven technique for the improvement of avatar motion", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Wolfe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Wilbur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Moncrief", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Malaia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Fujimoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Baowidan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Stec", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "McDonald, J., Wolfe, R., Wilbur, R. B., Moncrief, R., Malaia, E., Fujimoto, S., Baowidan, S., and Stec, J. (2016). A new tool to facilitate prosodic analysis of mo- tion capture data and a data-driven technique for the im- provement of avatar motion.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Coarticulation analysis for sign language synthesis", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Naert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Larboulette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Gibet", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Universal Access in Human-Computer Interaction. Designing Novel Interactions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "55--75", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Naert, L., Larboulette, C., and Gibet, S. (2017). Coar- ticulation analysis for sign language synthesis. In Margherita Antona et al., editors, Universal Access in Human-Computer Interaction. Designing Novel Interac- tions, pages 55-75, Cham. Springer International Pub- lishing.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Effective and efficient similarity searching in motion capture data", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Sedmidubsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Elias", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Zezula", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Multimedia Tools Appl", |
|
"volume": "77", |
|
"issue": "10", |
|
"pages": "12073--12094", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sedmidubsky, J., Elias, P., and Zezula, P. (2018). Effective and efficient similarity searching in motion capture data. Multimedia Tools Appl., 77(10):12073-12094, May.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Visualization of MoCap camera layout. View from back and above, the signer is in the middle.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Signer marker setup.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Annotation in ELAN.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Marker setup (data visualization).", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF4": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Model visualization.", |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"content": "<table/>", |
|
"text": "Maker sizes and count per segment.", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"6\">: Confusion matrix of normalized DTW distances</td></tr><tr><td colspan=\"6\">for utterance 1. Synthesised data (synth), compared with</td></tr><tr><td colspan=\"6\">real data (appear1-3) and other utterance with different</td></tr><tr><td>meaning.</td><td/><td/><td/><td/><td/></tr><tr><td/><td colspan=\"5\">synth appear1 appear2 appear3 other</td></tr><tr><td>synth</td><td>0</td><td>1.51</td><td>1.43</td><td>1.61</td><td>5.28</td></tr><tr><td colspan=\"2\">appear1 1.51</td><td>0</td><td>0.62</td><td>0.71</td><td>4.69</td></tr><tr><td colspan=\"2\">appear2 1.43</td><td>0.62</td><td>0</td><td>0.82</td><td>4.84</td></tr><tr><td colspan=\"2\">appear3 1.61</td><td>0.71</td><td>0.82</td><td>0</td><td>4.60</td></tr><tr><td>other</td><td>5.28</td><td>4.69</td><td>4.84</td><td>4.60</td><td>0</td></tr></table>", |
|
"text": "", |
|
"html": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |