|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:28:18.997365Z" |
|
}, |
|
"title": "K-RSL: a Corpus for Linguistic Understanding, Visual Evaluation, and Recognition of Sign Languages", |
|
"authors": [ |
|
{ |
|
"first": "Alfarabi", |
|
"middle": [], |
|
"last": "Imashev", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Medet", |
|
"middle": [], |
|
"last": "Mukushev", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Vadim", |
|
"middle": [], |
|
"last": "Kimmelman", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Bergen", |
|
"location": { |
|
"country": "Norway" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Anara", |
|
"middle": [], |
|
"last": "Sandygulova", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "The paper presents the first dataset that aims to serve interdisciplinary purposes for the utility of computer vision community and sign language linguistics. To date, a majority of Sign Language Recognition (SLR) approaches focus on recognising sign language as a manual gesture recognition problem. However, signers use other articulators: facial expressions, head and body position and movement to convey linguistic information. Given the important role of non-manual markers, this paper proposes a dataset and presents a use case to stress the importance of including non-manual features to improve the recognition accuracy of signs. To the best of our knowledge no prior publicly available dataset exists that explicitly focuses on non-manual components responsible for the grammar of sign languages. To this end, the proposed dataset contains 28250 videos of signs of high resolution and quality, with annotation of manual and nonmanual components. We conducted a series of evaluations in order to investigate whether non-manual components would improve signs' recognition accuracy. We release the dataset to encourage SLR researchers and help advance current progress in this area toward realtime sign language interpretation. Our dataset will be made publicly available at https:// krslproject.github.io/krsl-corpus", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "The paper presents the first dataset that aims to serve interdisciplinary purposes for the utility of computer vision community and sign language linguistics. To date, a majority of Sign Language Recognition (SLR) approaches focus on recognising sign language as a manual gesture recognition problem. However, signers use other articulators: facial expressions, head and body position and movement to convey linguistic information. Given the important role of non-manual markers, this paper proposes a dataset and presents a use case to stress the importance of including non-manual features to improve the recognition accuracy of signs. To the best of our knowledge no prior publicly available dataset exists that explicitly focuses on non-manual components responsible for the grammar of sign languages. To this end, the proposed dataset contains 28250 videos of signs of high resolution and quality, with annotation of manual and nonmanual components. We conducted a series of evaluations in order to investigate whether non-manual components would improve signs' recognition accuracy. We release the dataset to encourage SLR researchers and help advance current progress in this area toward realtime sign language interpretation. Our dataset will be made publicly available at https:// krslproject.github.io/krsl-corpus", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "There exist over 300 sign languages around the world that are native to 70 million deaf people (Bragg et al., 2019) . Sign languages are comprised of hand gestures, arms and body movements, head position, facial expressions, and lip patterns (Sandler and Lillo-Martin, 2006) . While automatic speech recognition has progressed to being commercially available, automatic Sign Language Recognition (SLR) is still in its infancy (Cooper et al., 2011) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 115, |
|
"text": "(Bragg et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 242, |
|
"end": 274, |
|
"text": "(Sandler and Lillo-Martin, 2006)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 426, |
|
"end": 447, |
|
"text": "(Cooper et al., 2011)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To date, more than half of published visionbased research utilizes isolated sign language data with a vocabulary size of less than 50 signs (Koller, 2020) . But the real-world utility of SLR solutions requires continuous recognition, which is significantly more challenging than recognising individual signs due to co-articulation (the ending of one sign affecting the start of the next), depiction (visually representing or enacting content), epenthesis effects (insertion of extra features into signs), generalization, and so on (Bragg et al., 2019) . As a result, realistic, generalisable, and large datasets are necessary to advance SLR.", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 154, |
|
"text": "(Koller, 2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 531, |
|
"end": 551, |
|
"text": "(Bragg et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Current efforts in SLR do not address the complexities of sign language linguistics, and thus have a limited real-world value (Bragg et al., 2019) . Chatzis et al. (2020) highlight the importance of non-manual components of sign languages. For example, they can change meaning of a verb, or differentiate between objects and people. According to Koller (2020) , there is an overall lack of nonmanual parameters that are included in medium and larger vocabulary recognition systems. For example, many computer vision approaches focus on the signers' hands only and tend to ignore the rich channel of information conveyed by non-manual articulators: facial expressions, mouthing, movement and position of the head and body conveying important grammatical and lexical information. In addition, many datasets allowed novice or nonnative contributions (i.e. students) in addition to slower signing and simplifying the style and the vocabulary to make the computer vision problem easier but of no real value (Bragg et al., 2019) . For the progress in SLR, interdisciplinary efforts are required with an involvement of native signers and sign language linguists.", |
|
"cite_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 146, |
|
"text": "(Bragg et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 170, |
|
"text": "Chatzis et al. (2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 346, |
|
"end": 359, |
|
"text": "Koller (2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1002, |
|
"end": 1022, |
|
"text": "(Bragg et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Beyond targeting the local need of creating the first corpus within CIS (Commonwealth of Independent States) region suitable for machine learn-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Signers Vocabulary Videos Purdue RVL-SLLL ASL (2002 ( ) (Mart\u00ednez et al., 2002 14 104 2,576 GSL Lemmas (2007) (Efthimiou and Fotinea, 2007) 2 1046 2,100 RWTH- BOSTON (2008 ) (Athitsos et al., 2008 5 483 7,768 SIGNUM (2008) (Von Agris et al., 2008) 25 780 3,703 Finish S-pot (2014) (Viitaniemi et al., 2014) 5 1211 4,328 RWTH-PHOENIX-Weather 2014 T (Cihan Camgoz et al., 2018) 9 1231 45,760 Video-Based CSL (2018) (Huang et al., 2018) 50 178 25,000 KETI (2019) (Ko et al., 2019) 12 419 11,578 GSL SI (2019) (Chatzis et al., 2020) 7 310 10,290 K-RSL 10 600 28,250 ing, the motivation behind the proposed dataset is in the need to stress the importance of non-manual components present in many signs. The proposed dataset contains continuous sign language data with a focus on specifically selected cases where nonmanual markers play a vital role in differentiating between similar signs or sentences. This approach of corpus creation allows researchers from different fields to conduct experiments utilising this dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 46, |
|
"end": 51, |
|
"text": "(2002", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 52, |
|
"end": 78, |
|
"text": "( ) (Mart\u00ednez et al., 2002", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 96, |
|
"end": 109, |
|
"text": "Lemmas (2007)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 110, |
|
"end": 139, |
|
"text": "(Efthimiou and Fotinea, 2007)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 159, |
|
"end": 171, |
|
"text": "BOSTON (2008", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 172, |
|
"end": 196, |
|
"text": ") (Athitsos et al., 2008", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 222, |
|
"text": "(2008)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 223, |
|
"end": 247, |
|
"text": "(Von Agris et al., 2008)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 281, |
|
"end": 306, |
|
"text": "(Viitaniemi et al., 2014)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 348, |
|
"end": 375, |
|
"text": "(Cihan Camgoz et al., 2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 413, |
|
"end": 433, |
|
"text": "(Huang et al., 2018)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 460, |
|
"end": 477, |
|
"text": "(Ko et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 506, |
|
"end": 528, |
|
"text": "(Chatzis et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To date, SL linguists and ML researchers were rarely able to utilize the same datasets due to limitations of both kinds. Thus, we make the following contributions:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 we release the first Kazakh-Russian Sign Language (KRSL) corpus consisting of 10 signers, 28250 continuous sentences, and vocabulary size 600 signs appropriate for ML research;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 we release raw videos appropriate for linguists and general population;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 we release isolated signs, extracted frames and features for easy and fast experiments aiming at compatibility with the formats of other SL datasets;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 we evaluate pose estimation and action recognition approaches to setup baselines on the K-RSL dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Section 2 presents the background on sign languages and non-manual components followed by a brief description of other SL datasets. Section 3 outlines the proposed dataset. Section 4 details a series of baseline evaluations conducted in order to investigate whether non-manual components would improve recognition accuracy. Section 5 details our use case evaluation. Section 6 concludes the paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This section discusses related work on sign language datasets, state of the art in SLR, and the importance of non-manual features for sign languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Sign language datasets consist of videos of either isolated or continuous signing. Table 1 presents a comparison of the continuous sign language datasets commonly utilized for sign language recognition with an inclusion of the proposed K-RSL ordered by date. Bragg et al. (2019) specify that the size of the datasets, continuous signing, involvement of native signers, and signers' variety are the main concerns related to current datasets. These challenges put a limitation on the accuracy and robustness of the models developed for SLR to be deployed in the real-world applications.", |
|
"cite_spans": [ |
|
{ |
|
"start": 259, |
|
"end": 278, |
|
"text": "Bragg et al. (2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 90, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sign Language Datasets", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Latest works in the area of SLR are focused on vision-based continuous sign language recognition. All the evaluations are performed on the RWTH-PHOENIX-Weather 2014 dataset (Cihan Camgoz et al., 2018) . There are various approaches offering recognition frameworks utilizing deep neural networks, reinforcement learning or recurrent neural networks. For example, proposed an approach that apply encoder-decoder structure to the reinforcement learning. Their method achieved competitive results when compared with other methods and has a Word Error Rate (WER) of 38.3%. Temporal segmentation creates additional challenges for continuous SLR. To address this issue, Huang et al. (2018) Space (LS-HAN). This proposed framework eliminated the preprocessing of temporal segmentation and achieved the accuracy of 0.617. proposed I3D-TEM-CTC framework with iterative optimization for continuous sign language recognition. By increasing the quality of pseudo labels, the final performance of the system was improved and achieved a WER of 34.5%. However, the most promising results were achieved by combining different modalities. Cui et al. (2019) proposed recurrent convolutional neural network on the multi-modal fusion data of RGB images along with the optical flow data and achieved WER of 22.86%. presented approaches where they focused on the sequential parallelism to learn a sign language, mouth shape and handshape classifier. They have improved the WER to 26.0%. This clearly shows that combination of manual and non-manual features such as mouth shape could significantly improve performance of the recognition systems.", |
|
"cite_spans": [ |
|
{ |
|
"start": 173, |
|
"end": 200, |
|
"text": "(Cihan Camgoz et al., 2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 663, |
|
"end": 682, |
|
"text": "Huang et al. (2018)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1121, |
|
"end": 1138, |
|
"text": "Cui et al. (2019)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sign Language Recognition", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Sign languages are natural languages existing in the visual modality (Sandler and Lillo-Martin, 2006) . Signs in sign languages are produced not only by using the manual articulators (the hands), but also by non-manual articulators (the body, head, facial features). The importance of the non-manual features is evidenced e.g. by the fact that signers focus their attention not on the hands of the interlocutor, but on the face (Pfau and Quer, 2010) . It has been shown that non-manual markers function at different levels in sign languages (Pfau and Quer, 2010) . On the lexical level, signs which are manually identical can be distinguished by facial expression or specifically by mouthing (silent articulation of a word from a spoken language) (Crasborn et al., 2008) . Signs referring to emotions are obligatorily accompanied by lexicalized facial expressions related to the corresponding emotion. Non-manual markers are especially important on the level of sentence and beyond. Specifically, negation in many sign languages is expressed by head movements (Zeshan, 2004a) , and questions are distinguished from statements by eyebrow and head position almost universally (Zeshan, 2004b) . Of course, signers also use the face to express their emotions, so emotional and linguistic non-manual markers can interact in complex ways (De Vos et al., 2009) . Antonakos et al. (2015) presented an overview of non-manual parameter employment for SLR and conclude that a limited number of works focused on employing non-manual features in SLR. There have been works that focused on combining both manual and non-manual features (Freitas et al., 2017; Liu et al., 2014; Yang and Lee, 2013; Mukushev et al., 2020) or non-manual features only (Kumar et al., 2017) . While the importance of nonmanual markers has been thoroughly demonstrated in linguistic research, their role in sign language recognition has not been investigated in detail yet.", |
|
"cite_spans": [ |
|
{ |
|
"start": 69, |
|
"end": 101, |
|
"text": "(Sandler and Lillo-Martin, 2006)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 428, |
|
"end": 449, |
|
"text": "(Pfau and Quer, 2010)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 541, |
|
"end": 562, |
|
"text": "(Pfau and Quer, 2010)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 747, |
|
"end": 770, |
|
"text": "(Crasborn et al., 2008)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1060, |
|
"end": 1075, |
|
"text": "(Zeshan, 2004a)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 1174, |
|
"end": 1189, |
|
"text": "(Zeshan, 2004b)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1332, |
|
"end": 1353, |
|
"text": "(De Vos et al., 2009)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1356, |
|
"end": 1379, |
|
"text": "Antonakos et al. (2015)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1622, |
|
"end": 1644, |
|
"text": "(Freitas et al., 2017;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1645, |
|
"end": 1662, |
|
"text": "Liu et al., 2014;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1663, |
|
"end": 1682, |
|
"text": "Yang and Lee, 2013;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 1683, |
|
"end": 1705, |
|
"text": "Mukushev et al., 2020)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 1734, |
|
"end": 1754, |
|
"text": "(Kumar et al., 2017)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Importance of Non-manual Features", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Given the important role of non-manual markers, in this paper we present a corpus which is motivated by the importance of both manual and non-manual features. We focus on specific cases where nonmanual markers play a vital role in differentiating between similar signs or similar sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Proposed K-RSL Corpus", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "KRSL is the sign language used in the Republic of Kazakhstan. KRSL is closely related to Russian Sign Language (RSL) as centralized language policy of Soviet Union led to the spread of RSL in the Soviet republics. According to Kimmelman et al. (2020) both KRSL and RSL show a substantial lexical overlap, and are completely mutually intelligible. At the same time, it cannot be concluded that the same applies to the grammar of the two languages. Figure 1 : Examples of each sign from our dataset: A) \"which one\" statement, B) \"which one\" question, C) \"which\" statement, D) \"which\" question, E) \"how\" statement, F) \"how\" question, G) \"what\" statement, H) \"what\" question, I) \"who\" statement, J) \"who\" question, K) \"when\" statement, L) \"when\" question, M) \"where(location)\" statement, N) \"where(location)\" question, O) \"where(direction)\" statement, P) \"where(direction)\" question, Q) \"where(direction)\" statement, R) \"where(direction)\" question, S) \"how much\" statement, T) \"how much\" question. Figure 2 : Emotions: A) \"happy\", B) \"sad\", C) \"anger\", D) \"scared\", E) \"pity\", F) \"surprised\".", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 447, |
|
"end": 455, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 994, |
|
"end": 1002, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Kazakh-Russian Sign Language (KRSL)", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "K-RSL dataset consists of videos of phrases, recorded by five professional sign language interpreters and one subset was additionally recorded by five deaf participants who are also native signers. Dataset can be divided into four subsets from the linguistic point of view: question-statement pairs, signs of emotion, emotional question-statement pairs, and phonologically similar signs (minimal pairs). They have been asked to sign 200 phrases for the first subset, 60 phrases for the second subset, 30 phrase with 3 emotional characteristics for the third subset, and 125 phrases for the fourth subset accordingly. Each phrase was repeated at least ten times in a row by each signer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Data", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The five hearing participants are hearing native signers of KRSL, as they grew up with parents using KRSL at home. Four of them are employed as news interpreters at the national television. The setup had a green background and a LOGITECH C920 HD PRO WEBCAM. The shooting was performed in an office space without professional lighting sources. The summary of the K-RSL dataset is presented in Table 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 392, |
|
"end": 399, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The Data", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Similar to question words in many spoken languages, question signs in KRSL can be used not only in questions (Who came?) but also in statements (I know who came). Thus, each question sign can occur either with non-manual question marking (eyebrow raise, sideward or backward head tilt), or without it. In addition, question signs are usually accompanied by mouthing of the corresponding Russian/Kazakh word (e.g. kto/kim for 'who', and chto/ne for 'what'). While question signs are also distinguished from each other by manual features, mouthing provides extra information, which can be used in recognition. Thus, the two types of non-manual markers (eyebrow and head position vs. mouthing) can play a different role in recognition: the former can be used to distinguish statements from questions, and the latter can be used to help distinguish different question signs from each other. To this end, we selected ten words and composed twenty phrases with each word (ten statements and ten questions): 'what', 'who', 'which', 'which one', 'when', 'where (direction)', 'where (location)', 'why', 'how', and 'how much'. We distinguish them to twenty classes (as ten words have a pair in both statement and question form). : Examples of three phonological minimal pairs: A) \"tea\", B) \"Thursday\", C) \"orange\", D) \"October\", E) \"Moscow\" F) \"old\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question vs Statement", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "In KRSL, as in other sign languages, the signs for emotions, such as ANGRY, SAD, SURPRISED, SCARED, PITY, HAPPY are accompanied with facial expressions corresponding to the emotion named by the sign. Therefore, we collected phrases containing the six signs for basic emotions. We hypothesized that, since facial expressions in this signs are lexically associated with them, inclusion of non-manual components can improve recognition of these signs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotion signs", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "De Vos et al. (2009) analyzed interaction of emotional facial expressions and grammatical nonmanual markers in Sign Language of the Netherlands (NGT). They elicited polar and content questions in NGT, as well as sentences with topic marking signed neutrally, with anger, or with surprise. Polar questions and topics are normally accompanied with raised eyebrows, while content questions with furrowed eyebrows; the emotion of anger causes eyebrow furrowing, and the emotion of surprise causes eyebrow raise. Therefore, in some of the contexts emotions and grammar were in agreement (e.g. surprised polar questions), while in others in competition (e.g. angry polar questions). The researchers found that emotional and grammatical non-manuals interact in complex ways.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotional questions vs. emotional statements", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "We created a similar dataset for KRSL. The signers were asked to sign ten sentences as either a statement (no eyebrow movement expected), a polar questions (eyebrow raise expected) or wh-questions (adding single question sing), and with three different emotions: neutral, surprise (eyebrow raise expected), and anger (eyebrow furrowing expected). We hypothesized that emotions and grammatical markers would interact in complex ways, and that these interactions might negatively influence recognition accuracy when recognizing sentence types (questions vs statements).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotional questions vs. emotional statements", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "Similar to words in spoken languages, signs can form minimal pairs: one can find signs that are minimally different in their manual component (Sandler and Lillo-Martin, 2006) . For instance, the KRSL signs \"Moscow\", \"old\", and \"grandmother\" all have the same handshape (the fist) and location (the cheek), but different movements. It is possible to find signs which are distinguished by handshape only or by location only as well.", |
|
"cite_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 174, |
|
"text": "(Sandler and Lillo-Martin, 2006)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Minimal pairs", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "We hypothesized that minimal pairs of signs are potentially difficult for recognition, as they are quite similar in shape. However, these signs are additionally distinguished by mouthing (see above). Therefore, including non-manual components can improve sign recognition for such pairs of signs. We thus created a dataset with 15 minimal pairs of signs signed as parts of phrases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Minimal pairs", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "We utilized OpenPose library (Cao et al., 2017; Wei et al., 2016) in order to extract the keypoints of the person in the videos. OpenPose is the realtime multi-person keypoint detection library for body, face, hands, and foot estimation provided by Carnegie Mellon University . It detects 2D information of 25 keypoints (joints) on the body and feet, 2x21 keypoints on both hands and 70 keypoints on the face. It also provides a 3D single-person keypoint detection in real time on multi-camera videos. OpenPose provides the values for each keyframe as an output in JSON format. Since the dataset we use consists of RGB videos, we only consider 2D keypoints in this work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 47, |
|
"text": "(Cao et al., 2017;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 48, |
|
"end": 65, |
|
"text": "Wei et al., 2016)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Openpose Feature Extraction", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Signing recognition can be considered as a variation of action recognition or human pose estimation tasks. Keypoint detection library OpenPose (Cao et al., 2017; Wei et al., 2016) enables us to evaluate both manual (hand keypoints) and nonmanual features (face and pose keypoints). One of the latest works in action recognition (Tran et al., 2018) introduces a new spatiotemporal convolutional block R(2+1)D that achieves state-of-the-art results. In order to analyze and classify collected dataset we employ both approaches as a baseline models for isolated sign recognition. We have extracted isolated clips from the statement-question subset of following signs: 'what', 'who', 'which', 'which one', 'when', 'where (direction)', 'where (location)', 'why', 'how', and 'how much'. We distinguish them to twenty classes (as ten words have a pair in both statement and question form).", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 161, |
|
"text": "(Cao et al., 2017;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 162, |
|
"end": 179, |
|
"text": "Wei et al., 2016)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 328, |
|
"end": 347, |
|
"text": "(Tran et al., 2018)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline methods", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our subsets mainly imply classification problems and have sequential features. Generally, we extract features in each frame of videos using OpenPose (Cao et al., 2017; Wei et al., 2016) library and then feed it to the classification algorithm. Therefore, we exploit classical machine learning techniques, namely Logistic regression by concatenating sequences of keypoints into one sample. The sequence of keyframes holds the frames of each sign video. Since we aim to compare performances of non-manual features, we prepared two conditions: manual only and manual and non-manual fea-tures combined. Consequentially, in the first case, one datapoint consists of concatenated keypoints of each video and has a maximum of 30 frames * 84 keypoints = 2520 manual only features, while in the second case, one datapoint consists of 30 frames * 274 keypoints = 8220 manual and nonmanual features for each of the twenty classes. We used the scikit-learn library for Python as the keypoints classification method for the experiments presented in this paper.", |
|
"cite_spans": [ |
|
{ |
|
"start": 149, |
|
"end": 167, |
|
"text": "(Cao et al., 2017;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 168, |
|
"end": 185, |
|
"text": "Wei et al., 2016)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pose estimation baseline", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Latest works in action recognition either employ Two-Stream Inflated 3D ConvNet (I3D) (Carreira and Zisserman, 2017) or spatiotemporal convolutional block R(2+1)D (Tran et al., 2018) . Both architectures are usually trained on ImageNet (Russakovsky et al., 2015) and fine-tuned on Kinetics dataset (Kay et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 116, |
|
"text": "(Carreira and Zisserman, 2017)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 163, |
|
"end": 182, |
|
"text": "(Tran et al., 2018)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 298, |
|
"end": 316, |
|
"text": "(Kay et al., 2017)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Action recognition baseline", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In this paper, we employ R(2+1)D (Ghadiyaram et al., 2019) model which is highly accurate and significantly faster than other approaches. It is additionally pre-trained on over 65 million videos. Also, it uses as input only video frames, which makes it faster comparing to other approached that require optical flow fields as additional input. In order to recognize signs from our dataset we finetuned R(2+1)D on the statement-questions subset. Since we have a different number of classes in our subset, only the last fully connected of the model is re-trained.", |
|
"cite_spans": [ |
|
{ |
|
"start": 33, |
|
"end": 58, |
|
"text": "(Ghadiyaram et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Action recognition baseline", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The action recognition baseline is implemented in PyTorch (Paszke et al., 2019) and uses a R(2+1)D pre-trained model (Ghadiyaram et al., 2019) . Model input size (number of consecutive frames) is set to 8 and batch size is 16. We train the model for 20 epochs with a starting learning rate of 0.0001. All frames are scaled to a resolution of 112 112 and keeping original ratio. Also, during the training process frames are randomly cropped with scale between 0.6 and 1. The pose estimation baseline is implemented using scikit-learn library (Pedregosa et al., 2011) and takes as an input sequence of keypoints extracted using the OpenPose library (Cao et al., 2017; Wei et al., 2016) . We train Logistic Regression classifier using the 'lbfgs' solver and L2 penalty.", |
|
"cite_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 79, |
|
"text": "(Paszke et al., 2019)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 117, |
|
"end": 142, |
|
"text": "(Ghadiyaram et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 541, |
|
"end": 565, |
|
"text": "(Pedregosa et al., 2011)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 647, |
|
"end": 665, |
|
"text": "(Cao et al., 2017;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 666, |
|
"end": 683, |
|
"text": "Wei et al., 2016)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation details", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "As stated in Table 2 , each subset has 5 signers, which were assigned an approximately equal number of videos. The only exception is the Emotional Question-Statement subset which has 10 signers. We assign all videos performed by 4 signers in the train set and videos with the remaining signer into the test set. In addition, we choose the remaining signer for each class randomly, to diversify train and test data. Validation set is randomly chosen from the train set and has 20% length of the train set.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 13, |
|
"end": 20, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Suggested Train-Test Splits", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "The main problem of developing sign language recognition algorithm is that data is usually not big and/or diverse enough for generalization. Thus, we suggest a simple method to augment image sequences of fixed length from videos with a variable amount of frames. The only constraint is that a video has to be longer than a chosen fixed length.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data augmentation", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "Given a sign video V = (f 1 , f 2 , ..., f m ) that contains m frames, which satisfies condition m \u2265 n, where n is the chosen fixed sequence length, we pick equally distanced frames from videos with a random initial frame. By distance between the frames, we mean the difference between their indexes, let's call it s.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data augmentation", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "s = m n", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data augmentation", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "The initial frame is picked among all possible candidates which are first s frames with k leftover frames after them. Here, k = m mod n. Therefore, the augmented fixed sized sequence is S = (f i , f i+s , f i+2s , ..., f i+ns ), where i is a random integer from 1 to s + k.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data augmentation", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "A series of experiments was conducted in order to investigate whether non-manual features would improve recognition accuracy. All experiments were performed on isolated signs extracted from the Question-Statement subset and divided into 20 classes (10 signs as statement and questions). The first experiment was the classification of 20 classes. For this reason we trained two baseline models: a logistic regression model using only manual features and with non-manual features as an input, and a R(2+1)D model on full frames as an input. Evaluation of each model was repeated 10 times with random train/test splits to avoid extreme cases. Table 3 presents the mean scores and standard deviations for the first experiment. The second experiment used the same dataset with 20 classes to compare and contrast the accuracy in terms of its improvement with different combinations of non-manual components. Table 3 : Mean scores of accuracy for the questionstatement subset after 10 iterations with random train/test splits", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 640, |
|
"end": 648, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 903, |
|
"end": 910, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Our first experiment used the Question-Statement subset divided into 20 classes (10 signs used in statements and questions). We have extracted manual and non-manual features for the isolated signs of the Question-Statement subset. The highest accuracy was achieved by the R(2+1)D model and was 86%, which is 9% higher comparing to the Logistic regression model. For the Logistic regression model trained on sequence of keypoints testing mean accuracy scores are 73.4% and 77% on manual-only and both manual and non-manual features respectively. As expected, non-manual features improved the results by 3.6% on average (from 73.4% accuracy to 77% accuracy). At the same time, improvement was not very high. The reason for that could be that the number of nonmanual features is bigger than the number of manual features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question vs. Statement", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "In this experiment different combinations of nonmanual markers (eyebrow and head position vs. mouthing) were compared and their role in recognition was analyzed. The lowest testing accuracy was 73.25% for the combination of manual features and eyebrows keypoints. Eyebrows without any other non-manual feature did not provide valuable information for recognition. Only when they were used in combination with other features, the accuracy was im-proved. The highest testing accuracy was 78.2% for the combination of manual features and faceline, eyebrows, and mouth keypoints. When only mouth keypoints were used in combination with the manual features, the accuracy also increased by 0.5% compared to the baseline of 77%. Thus, we see that mouthing provides extra information, which can be used in recognition, because signers usually articulate words while performing corresponding signs. Eyebrows and head position provide additional grammatical markers to differentiate statements from questions. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A case of combining different modalities", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "This paper presents the K-RSL dataset motivated by the need to create SL datasets for interdisciplinary purposes e.g. for computer vision and computational linguistics research. Due to the challenging nature of SLR, the proposed dataset aims to attract the attention of the computer vision community with the K-RSL dataset being linguistically rich. The data was carefully selected to find various cases when manual gestures will not provide good performance and will stress the need to include nonmanual components into consideration. In addition to computer vision community, this dataset can be utilized by the linguistics community to explore research questions and computationally prove their hypotheses. Future work will include expanding the vocabulary of the corpus in addition to diversifying and increasing the number of signers recorded in noisy environmental conditions (e.g. outside of the office environment).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was supported by the Nazarbayev University Faculty Development Competitive Research Grant Program 2019-2021 \"Kazakh Sign Language Automatic Recognition System (K-SLARS)\". Award number is 110119FD4545.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgment", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "A survey on mouth modeling and analysis for sign language recognition", |
|
"authors": [ |
|
{ |
|
"first": "Epameinondas", |
|
"middle": [], |
|
"last": "Antonakos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anastasios", |
|
"middle": [], |
|
"last": "Roussos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefanos", |
|
"middle": [], |
|
"last": "Zafeiriou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1--7", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Epameinondas Antonakos, Anastasios Roussos, and Stefanos Zafeiriou. 2015. A survey on mouth model- ing and analysis for sign language recognition. 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG), 1:1- 7.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "The american sign language lexicon video dataset", |
|
"authors": [ |
|
{ |
|
"first": "Carol", |
|
"middle": [], |
|
"last": "Vassilis Athitsos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stan", |
|
"middle": [], |
|
"last": "Neidle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joan", |
|
"middle": [], |
|
"last": "Sclaroff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Nash", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quan", |
|
"middle": [], |
|
"last": "Stefan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashwin", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Thangali", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vassilis Athitsos, Carol Neidle, Stan Sclaroff, Joan Nash, Alexandra Stefan, Quan Yuan, and Ashwin Thangali. 2008. The american sign language lexicon video dataset. 2008 IEEE Computer Society Confer- ence on Computer Vision and Pattern Recognition Workshops, pages 1-8.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Sign language recognition, generation, and translation: An interdisciplinary perspective", |
|
"authors": [ |
|
{ |
|
"first": "Danielle", |
|
"middle": [], |
|
"last": "Bragg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oscar", |
|
"middle": [], |
|
"last": "Koller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mary", |
|
"middle": [], |
|
"last": "Bellard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Larwan", |
|
"middle": [], |
|
"last": "Berke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Boudreault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Annelies", |
|
"middle": [], |
|
"last": "Braffort", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naomi", |
|
"middle": [], |
|
"last": "Caselli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Huenerfauth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hernisa", |
|
"middle": [], |
|
"last": "Kacorri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tessa", |
|
"middle": [], |
|
"last": "Verhoef", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "The 21st International ACM SIGACCESS Conference on Computers and Accessibility", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "16--31", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danielle Bragg, Oscar Koller, Mary Bellard, Lar- wan Berke, Patrick Boudreault, Annelies Braffort, Naomi Caselli, Matt Huenerfauth, Hernisa Kacorri, Tessa Verhoef, et al. 2019. Sign language recog- nition, generation, and translation: An interdisci- plinary perspective. In The 21st International ACM SIGACCESS Conference on Computers and Accessi- bility, pages 16-31. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Realtime multi-person 2d pose estimation using part affinity fields", |
|
"authors": [ |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Simon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shih-En", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaser", |
|
"middle": [], |
|
"last": "Sheikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7291--7299", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhe Cao, Tomas Simon, Shih-En Wei, and Yaser Sheikh. 2017. Realtime multi-person 2d pose esti- mation using part affinity fields. In Proceedings of the IEEE Conference on Computer Vision and Pat- tern Recognition, pages 7291-7299.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Quo vadis, action recognition? a new model and the kinetics dataset", |
|
"authors": [ |
|
{ |
|
"first": "Joao", |
|
"middle": [], |
|
"last": "Carreira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Zisserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6299--6308", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joao Carreira and Andrew Zisserman. 2017. Quo vadis, action recognition? a new model and the kinetics dataset. In proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6299-6308.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Kosmas Dimitropoulos, and Petros Daras. 2020. A comprehensive study on deep learning-based 3d hand pose estimation methods", |
|
"authors": [ |
|
{ |
|
"first": "Theocharis", |
|
"middle": [], |
|
"last": "Chatzis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Stergioulas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitrios", |
|
"middle": [], |
|
"last": "Konstantinidis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Applied Sciences", |
|
"volume": "10", |
|
"issue": "19", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Theocharis Chatzis, Andreas Stergioulas, Dimitrios Konstantinidis, Kosmas Dimitropoulos, and Petros Daras. 2020. A comprehensive study on deep learning-based 3d hand pose estimation methods. Applied Sciences, 10(19):6850.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Neural sign language translation", |
|
"authors": [ |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Necati Cihan Camgoz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oscar", |
|
"middle": [], |
|
"last": "Hadfield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Koller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bowden", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7784--7793", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Necati Cihan Camgoz, Simon Hadfield, Oscar Koller, Hermann Ney, and Richard Bowden. 2018. Neural sign language translation. pages 7784-7793.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Sign language recognition", |
|
"authors": [ |
|
{ |
|
"first": "Helen", |
|
"middle": [], |
|
"last": "Cooper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Holt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Bowden", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Visual Analysis of Humans", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "539--562", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Helen Cooper, Brian Holt, and Richard Bowden. 2011. Sign language recognition. Visual Analysis of Hu- mans, pages 539-562.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Frequency distribution and spreading behavior of different types of mouth actions in three sign languages", |
|
"authors": [ |
|
{ |
|
"first": "Els", |
|
"middle": [], |
|
"last": "Onno A Crasborn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Van Der", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dafydd", |
|
"middle": [], |
|
"last": "Kooij", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bencie", |
|
"middle": [], |
|
"last": "Waters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johanna", |
|
"middle": [], |
|
"last": "Woll", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mesch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Sign Language & Linguistics", |
|
"volume": "11", |
|
"issue": "1", |
|
"pages": "45--67", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Onno A Crasborn, Els Van Der Kooij, Dafydd Wa- ters, Bencie Woll, and Johanna Mesch. 2008. Fre- quency distribution and spreading behavior of differ- ent types of mouth actions in three sign languages. Sign Language & Linguistics, 11(1):45-67.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "A Deep Neural Framework for Continuous Sign Language Recognition by Iterative Training", |
|
"authors": [ |
|
{ |
|
"first": "Runpeng", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hu", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changshui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "IEEE Transactions on Multimedia", |
|
"volume": "21", |
|
"issue": "7", |
|
"pages": "1880--1891", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Runpeng Cui, Hu Liu, and Changshui Zhang. 2019. A Deep Neural Framework for Continuous Sign Language Recognition by Iterative Training. IEEE Transactions on Multimedia, 21(7):1880-1891.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Mixed signals: Combining linguistic and affective functions of eyebrows in questions in sign language of the netherlands", |
|
"authors": [ |
|
{ |
|
"first": "Connie", |
|
"middle": [], |
|
"last": "De Vos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Els", |
|
"middle": [], |
|
"last": "Van Der Kooij", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Onno", |
|
"middle": [], |
|
"last": "Crasborn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Language and speech", |
|
"volume": "52", |
|
"issue": "2-3", |
|
"pages": "315--339", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Connie De Vos, Els Van der Kooij, and Onno Crasborn. 2009. Mixed signals: Combining linguistic and af- fective functions of eyebrows in questions in sign language of the netherlands. Language and speech, 52(2-3):315-339.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Gslc: creation and annotation of a greek sign language corpus for hci", |
|
"authors": [ |
|
{ |
|
"first": "Eleni", |
|
"middle": [], |
|
"last": "Efthimiou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stavroula-Evita", |
|
"middle": [], |
|
"last": "Fotinea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "International Conference on Universal Access in Human-Computer Interaction", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "657--666", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eleni Efthimiou and Stavroula-Evita Fotinea. 2007. Gslc: creation and annotation of a greek sign lan- guage corpus for hci. International Conference on Universal Access in Human-Computer Interaction, pages 657-666.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Grammatical facial expression recognition in sign language discourse: a study at the syntax level", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Fernando", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarajane", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Freitas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Peres", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Clodoaldo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felipe", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Lima", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Barbosa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Information Systems Frontiers", |
|
"volume": "19", |
|
"issue": "6", |
|
"pages": "1243--1259", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fernando A Freitas, Sarajane M Peres, Clodoaldo AM Lima, and Felipe V Barbosa. 2017. Grammatical facial expression recognition in sign language dis- course: a study at the syntax level. Information Sys- tems Frontiers, 19(6):1243-1259.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Large-scale weakly-supervised pre-training for video action recognition", |
|
"authors": [ |
|
{ |
|
"first": "Deepti", |
|
"middle": [], |
|
"last": "Ghadiyaram", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Du", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Mahajan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "12046--12055", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deepti Ghadiyaram, Du Tran, and Dhruv Mahajan. 2019. Large-scale weakly-supervised pre-training for video action recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 12046-12055.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Video-based sign language recognition without temporal segmentation", |
|
"authors": [ |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wengang", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qilin", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houqiang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weiping", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "32nd AAAI Conference on Artificial Intelligence, AAAI 2018", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2257--2264", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jie Huang, Wengang Zhou, Qilin Zhang, Houqiang Li, and Weiping Li. 2018. Video-based sign language recognition without temporal segmentation. In 32nd AAAI Conference on Artificial Intelligence, AAAI 2018, pages 2257-2264. AAAI press.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "The kinetics human action video dataset", |
|
"authors": [ |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Kay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joao", |
|
"middle": [], |
|
"last": "Carreira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Simonyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chloe", |
|
"middle": [], |
|
"last": "Hillier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sudheendra", |
|
"middle": [], |
|
"last": "Vijayanarasimhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabio", |
|
"middle": [], |
|
"last": "Viola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Green", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Back", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Natsev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1705.06950" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijaya- narasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, et al. 2017. The kinetics human action video dataset. arXiv preprint arXiv:1705.06950.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Medet Mukushev, and Anara Sandygulova. 2020. Eyebrow position in grammatical and emotional expressions in kazakh-russian sign language: A quantitative study", |
|
"authors": [ |
|
{ |
|
"first": "Alfarabi", |
|
"middle": [], |
|
"last": "Vadim Kimmelman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Imashev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "PloS one", |
|
"volume": "15", |
|
"issue": "6", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vadim Kimmelman, Alfarabi Imashev, Medet Muku- shev, and Anara Sandygulova. 2020. Eyebrow po- sition in grammatical and emotional expressions in kazakh-russian sign language: A quantitative study. PloS one, 15(6):e0233731.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Neural sign language translation based on human keypoint estimation", |
|
"authors": [ |
|
{ |
|
"first": "Sang-Ki", |
|
"middle": [], |
|
"last": "Ko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chang", |
|
"middle": [ |
|
"Jo" |
|
], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hyedong", |
|
"middle": [], |
|
"last": "Jung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Choongsang", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Applied Sciences", |
|
"volume": "9", |
|
"issue": "13", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sang-Ki Ko, Chang Jo Kim, Hyedong Jung, and Choongsang Cho. 2019. Neural sign language trans- lation based on human keypoint estimation. Applied Sciences, 9(13):2683.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Quantitative survey of the state of the art in sign language recognition", |
|
"authors": [ |
|
{ |
|
"first": "Oscar", |
|
"middle": [], |
|
"last": "Koller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2008.09918" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oscar Koller. 2020. Quantitative survey of the state of the art in sign language recognition. arXiv preprint arXiv:2008.09918.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Weakly Supervised Learning with Multi-Stream CNN-LSTM-HMMs to Discover Sequential Parallelism in Sign Language Videos", |
|
"authors": [ |
|
{ |
|
"first": "Oscar", |
|
"middle": [], |
|
"last": "Koller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cihan", |
|
"middle": [], |
|
"last": "Camgoz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Bowden", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--1", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oscar Koller, Cihan Camgoz, Hermann Ney, and Richard Bowden. 2019. Weakly Supervised Learn- ing with Multi-Stream CNN-LSTM-HMMs to Dis- cover Sequential Parallelism in Sign Language Videos. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 1-1.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Extraction of texture and geometrical features from informative facial regions for sign language recognition", |
|
"authors": [ |
|
{ |
|
"first": "Sunil", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manas", |
|
"middle": [], |
|
"last": "Kamal Bhuyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Biplab Ketan", |
|
"middle": [], |
|
"last": "Chakraborty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Journal on Multimodal User Interfaces", |
|
"volume": "11", |
|
"issue": "2", |
|
"pages": "227--239", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sunil Kumar, Manas Kamal Bhuyan, and Biplab Ketan Chakraborty. 2017. Extraction of texture and geo- metrical features from informative facial regions for sign language recognition. Journal on Multimodal User Interfaces, 11(2):227-239.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Non-manual grammatical marker recognition based on multi-scale, spatio-temporal analysis of head pose and facial expressions. Image and Vision Computing", |
|
"authors": [ |
|
{ |
|
"first": "Jingjing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaoting", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "671--681", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jingjing Liu, Bo Liu, Shaoting Zhang, Fei Yang, Peng Yang, Dimitris N Metaxas, and Carol Neidle. 2014. Non-manual grammatical marker recognition based on multi-scale, spatio-temporal analysis of head pose and facial expressions. Image and Vision Com- puting, 32(10):671-681.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Purdue rvl-slll asl database for automatic recognition of american sign language", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Aleix", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ronnie", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Mart\u00ednez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Wilbur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Avinash C", |
|
"middle": [], |
|
"last": "Shay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings. Fourth IEEE International Conference on Multimodal Interfaces", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "167--172", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aleix M Mart\u00ednez, Ronnie B Wilbur, Robin Shay, and Avinash C Kak. 2002. Purdue rvl-slll asl database for automatic recognition of american sign language. Proceedings. Fourth IEEE International Conference on Multimodal Interfaces, pages 167-172.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Evaluation of manual and non-manual components for sign language recognition", |
|
"authors": [ |
|
{ |
|
"first": "Medet", |
|
"middle": [], |
|
"last": "Mukushev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arman", |
|
"middle": [], |
|
"last": "Sabyrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alfarabi", |
|
"middle": [], |
|
"last": "Imashev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenessary", |
|
"middle": [], |
|
"last": "Koishybay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vadim", |
|
"middle": [], |
|
"last": "Kimmelman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anara", |
|
"middle": [], |
|
"last": "Sandygulova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6073--6078", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Medet Mukushev, Arman Sabyrov, Alfarabi Imashev, Kenessary Koishybay, Vadim Kimmelman, and Anara Sandygulova. 2020. Evaluation of man- ual and non-manual components for sign language recognition. In Proceedings of The 12th Language Resources and Evaluation Conference, pages 6073- 6078.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Pytorch: An imperative style, high-performance deep learning library", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Paszke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Massa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lerer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Bradbury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gregory", |
|
"middle": [], |
|
"last": "Chanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Killeen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zeming", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Natalia", |
|
"middle": [], |
|
"last": "Gimelshein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luca", |
|
"middle": [], |
|
"last": "Antiga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alban", |
|
"middle": [], |
|
"last": "Desmaison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Kopf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [], |
|
"last": "Devito", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Raison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alykhan", |
|
"middle": [], |
|
"last": "Tejani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sasank", |
|
"middle": [], |
|
"last": "Chilamkurthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benoit", |
|
"middle": [], |
|
"last": "Steiner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junjie", |
|
"middle": [], |
|
"last": "Bai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soumith", |
|
"middle": [], |
|
"last": "Chintala", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "8024--8035", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Te- jani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. 2019. Py- torch: An imperative style, high-performance deep learning library. In H. Wallach, H. Larochelle, A. Beygelzimer, F. dAlch\u00e9-Buc, E. Fox, and R. Gar- nett, editors, Advances in Neural Information Pro- cessing Systems 32, pages 8024-8035. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Scikit-learn: Machine learning in Python", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Pedregosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Varoquaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Gramfort", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Michel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Thirion", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Grisel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Blondel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Prettenhofer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Dubourg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Vanderplas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Passos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Cournapeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Brucher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Perrot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Duchesnay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2825--2830", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duch- esnay. 2011. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Nonmanuals: Their prosodic and grammatical roles. Sign languages", |
|
"authors": [ |
|
{ |
|
"first": "Roland", |
|
"middle": [], |
|
"last": "Pfau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josep", |
|
"middle": [], |
|
"last": "Quer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "381--402", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roland Pfau and Josep Quer. 2010. Nonmanuals: Their prosodic and grammatical roles. Sign languages, pages 381-402.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Ima-geNet Large Scale Visual Recognition Challenge", |
|
"authors": [ |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Russakovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jia", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Krause", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjeev", |
|
"middle": [], |
|
"last": "Satheesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sean", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiheng", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrej", |
|
"middle": [], |
|
"last": "Karpathy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Khosla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Bernstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Berg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Journal of Computer Vision (IJCV)", |
|
"volume": "115", |
|
"issue": "3", |
|
"pages": "211--252", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s11263-015-0816-y" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, An- drej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg, and Li Fei-Fei. 2015. Ima- geNet Large Scale Visual Recognition Challenge. International Journal of Computer Vision (IJCV), 115(3):211-252.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Sign language and linguistic universals", |
|
"authors": [ |
|
{ |
|
"first": "Wendy", |
|
"middle": [], |
|
"last": "Sandler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diane", |
|
"middle": [], |
|
"last": "Lillo-Martin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wendy Sandler and Diane Lillo-Martin. 2006. Sign language and linguistic universals. Cambridge Uni- versity Press.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Hand keypoint detection in single images using multiview bootstrapping", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Simon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanbyul", |
|
"middle": [], |
|
"last": "Joo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iain", |
|
"middle": [], |
|
"last": "Matthews", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaser", |
|
"middle": [], |
|
"last": "Sheikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the IEEE conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1145--1153", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Simon, Hanbyul Joo, Iain Matthews, and Yaser Sheikh. 2017. Hand keypoint detection in single im- ages using multiview bootstrapping. In Proceedings of the IEEE conference on Computer Vision and Pat- tern Recognition, pages 1145-1153.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "A closer look at spatiotemporal convolutions for action recognition", |
|
"authors": [ |
|
{ |
|
"first": "Du", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lorenzo", |
|
"middle": [], |
|
"last": "Torresani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Ray", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the IEEE conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6450--6459", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Du Tran, Heng Wang, Lorenzo Torresani, Jamie Ray, Yann LeCun, and Manohar Paluri. 2018. A closer look at spatiotemporal convolutions for action recog- nition. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 6450-6459.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "S-pota benchmark in spotting signs within continuous signing", |
|
"authors": [ |
|
{ |
|
"first": "Ville", |
|
"middle": [], |
|
"last": "Viitaniemi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommi", |
|
"middle": [], |
|
"last": "Jantunen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leena", |
|
"middle": [], |
|
"last": "Savolainen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matti", |
|
"middle": [], |
|
"last": "Karppa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jorma", |
|
"middle": [], |
|
"last": "Laaksonen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "European Language Resources Association (LREC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ville Viitaniemi, Tommi Jantunen, Leena Savolainen, Matti Karppa, and Jorma Laaksonen. 2014. S-pot- a benchmark in spotting signs within continuous signing. European Language Resources Association (LREC).", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "The significance of facial features for automatic sign language recognition", |
|
"authors": [ |
|
{ |
|
"first": "Moritz", |
|
"middle": [], |
|
"last": "Ulrich Von Agris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karl-Friedrich", |
|
"middle": [], |
|
"last": "Knorr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kraiss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "8th IEEE International Conference on Automatic Face & Gesture Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ulrich Von Agris, Moritz Knorr, and Karl-Friedrich Kraiss. 2008. The significance of facial features for automatic sign language recognition. 8th IEEE In- ternational Conference on Automatic Face & Ges- ture Recognition, pages 1-6.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Convolutional pose machines", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Shih-En", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Varun", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takeo", |
|
"middle": [], |
|
"last": "Ramakrishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaser", |
|
"middle": [], |
|
"last": "Kanade", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sheikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4724--4732", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shih-En Wei, Varun Ramakrishna, Takeo Kanade, and Yaser Sheikh. 2016. Convolutional pose machines. In Proceedings of the IEEE Conference on Com- puter Vision and Pattern Recognition, pages 4724- 4732.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Robust sign language recognition by combining manual and non-manual features based on conditional random field and support vector machine", |
|
"authors": [ |
|
{ |
|
"first": "Hee-Deok", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Seong-Whan", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Pattern Recognition Letters", |
|
"volume": "34", |
|
"issue": "16", |
|
"pages": "2051--2056", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hee-Deok Yang and Seong-Whan Lee. 2013. Robust sign language recognition by combining manual and non-manual features based on conditional random field and support vector machine. Pattern Recogni- tion Letters, 34(16):2051-2056.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Hand, head and face-negative constructions in sign languages", |
|
"authors": [ |
|
{ |
|
"first": "Ulrike", |
|
"middle": [], |
|
"last": "Zeshan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Linguistic Typology", |
|
"volume": "8", |
|
"issue": "1", |
|
"pages": "1--58", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ulrike Zeshan. 2004a. Hand, head and face-negative constructions in sign languages. Linguistic Typol- ogy, 8(1):1-58.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Interrogative constructions in signed languages: Crosslinguistic perspectives. Language", |
|
"authors": [ |
|
{ |
|
"first": "Ulrike", |
|
"middle": [], |
|
"last": "Zeshan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7--39", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ulrike Zeshan. 2004b. Interrogative constructions in signed languages: Crosslinguistic perspectives. Lan- guage, pages 7-39.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Continuous Sign Language Recognition via Reinforcement Learning", |
|
"authors": [ |
|
{ |
|
"first": "Zhihao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junfu", |
|
"middle": [], |
|
"last": "Pu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liansheng", |
|
"middle": [], |
|
"last": "Zhuang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wengang", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houqiang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "285--289", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhihao Zhang, Junfu Pu, Liansheng Zhuang, Wengang Zhou, and Houqiang Li. 2019. Continuous Sign Language Recognition via Reinforcement Learning. pages 285-289. Institute of Electrical and Electron- ics Engineers (IEEE).", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Dynamic Pseudo Label Decoding for Continuous Sign Language Recognition", |
|
"authors": [ |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wengang", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houqiang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1282--1287", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hao Zhou, Wengang Zhou, and Houqiang Li. 2019. Dynamic Pseudo Label Decoding for Continuous Sign Language Recognition. pages 1282-1287. Institute of Electrical and Electronics Engineers (IEEE).", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"text": "Examples of facial expressions in neutral, surprised and angry state of mind: A) neutral statements, B) neutral question, C) surprised statement, D) surprised question, E) angry statement, E) angry question.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"text": "Figure 4: Examples of three phonological minimal pairs: A) \"tea\", B) \"Thursday\", C) \"orange\", D) \"October\", E) \"Moscow\" F) \"old\".", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"TABREF0": { |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "Datasets used for sign language recognition", |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "Kazakh-Russian Sign Language dataset", |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"content": "<table><tr><td/><td colspan=\"3\">presents the accuracy scores</td></tr><tr><td colspan=\"3\">for each combination of features.</td><td/></tr><tr><td/><td colspan=\"3\">R(2+1)D Logistic regression</td></tr><tr><td colspan=\"4\">Features Full frame Manual Non-manual</td></tr><tr><td>Mean</td><td>86%</td><td>73.4%</td><td>77%</td></tr><tr><td>Std Dev</td><td>1</td><td>0.45</td><td>0.57</td></tr></table>", |
|
"text": "", |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "Comparison of results of features combinations", |
|
"type_str": "table", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |