|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:20:16.997890Z" |
|
}, |
|
"title": "A Survey on Paralinguistics in Tamil Speech Processing", |
|
"authors": [ |
|
{ |
|
"first": "Anosha", |
|
"middle": [], |
|
"last": "Ignatius", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Moratuwa Sri Lanka", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Uthayasanker", |
|
"middle": [], |
|
"last": "Thayasivam", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Moratuwa Sri Lanka", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Speech carries not only the semantic content but also the paralinguistic information which captures the speaking style. Speaker traits and emotional states affect how words are being spoken. The research on paralinguistic information is an emerging field in speech and language processing and it has many potential applications including speech recognition, speaker identification and verification, emotion recognition and accent recognition. Among them, there is a significant interest in emotion recognition from speech. A detailed study of paralinguistic information present in speech signal and an overview of research work related to speech emotion for Tamil Language is presented in this paper.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Speech carries not only the semantic content but also the paralinguistic information which captures the speaking style. Speaker traits and emotional states affect how words are being spoken. The research on paralinguistic information is an emerging field in speech and language processing and it has many potential applications including speech recognition, speaker identification and verification, emotion recognition and accent recognition. Among them, there is a significant interest in emotion recognition from speech. A detailed study of paralinguistic information present in speech signal and an overview of research work related to speech emotion for Tamil Language is presented in this paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The field of Paralinguistics deals with how something is being spoken and is limited to non-verbal aspects of speech. It covers the manner of speaking and information characterizing speaker traits and states. Paralinguistic content present in the speech signal provides information which is mainly grouped into short-term traits and long-term traits. Short-term traits include speaking style, voice quality, and emotional states while long-term traits include biological trait primitives such as height, weight, age and gender, ethnicity, culture, and personality. Health state, intoxication, mood, and sleepiness are categorized as medium term traits between short term and permanent traits. (Schuller and Batliner, 2014) The research on paralinguistics is beneficial in a wide range of speech processing applications. Performance of automatic speech recognition (ASR) is affected by variability in the speaking style due to speaker traits emotions. Analyzing the paralinguistic information in the speech help to compensate for the speaker variability through normalization techniques (Anosha and Uthayasanker, 2020) . In addition to this, several paralinguistic tasks including speaker recognition, accent recognition, and emotion recognition have been investigated over the past years. Interest in paralinguistics has significantly grown in the past years and Interspeech conference hosts a computational paralinguistics challenge ComParE since 2009 with different sets of tasks each year. The most popular paralinguistic task is speech emotion recognition. Emotion detection can help in making the human computer interface adapt to user's emotional condition, thus improving the user satisfaction. Study of paralinguistics can also be applied in diagnosing and monitoring the disease progression in diseases like neurodegenerative disorders which show speech impairment as one of the early signs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 693, |
|
"end": 722, |
|
"text": "(Schuller and Batliner, 2014)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1086, |
|
"end": 1117, |
|
"text": "(Anosha and Uthayasanker, 2020)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Selecting appropriate feature extraction techniques is important for discriminating between classes in paralinguistic tasks. Then most common features that capture paralinguistic information are low-level descriptors (LLD) (Schuller et al., 2013) which include mel frequency cepstral coefficients (MFCC), energy, pitch frequency, loudness, zero crossing rate, harmonicity, jitter, shimmer etc. Recent research works use deep neural networks (DNNs) to learn high-level acoustic features from utterance-level LLD or directly from raw speech signal. DNNs have shown significant performance in a variety of applications ranging from speech recognition to speaker identification and verification.", |
|
"cite_spans": [ |
|
{ |
|
"start": 223, |
|
"end": 246, |
|
"text": "(Schuller et al., 2013)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper presents the background of paralinguistics and focuses on the research work related to speech based emotion recognition for Tamil language. Tamil is a Dravidian language natively spoken by South Asia's Tamil people. Tamil is the official language of two sovereign states, Singapore and Sri Lanka, as well as the Indian province of Tamil Nadu and Puducherry (Chakravarthi et al., 2018 (Chakravarthi et al., , 2019 Chakravarthi, 2020) . With a history stretching back to 600 BCE, the Tamil language is one of the world's longest-surviving classical languages. Poetry dominates Tamil literature, especially Sangam literature, which consists of poems written between 600 BCE and 300 CE. The Tamil language accounts for more than 55 percent of the epigraphical inscriptions discovered by the Archaeological Survey of India (approximately 55,000) (Caldwell, 1875) . The rest of the paper is organized as follows: Section II describes in detail about paralinguistic information in speech and explores various acoustic features extracted from the speech signal. Section III discusses speech emotion recognition and presents a survey of research work done for Tamil language. The paper is concluded in Section IV, with discussions in this research area.", |
|
"cite_spans": [ |
|
{ |
|
"start": 368, |
|
"end": 394, |
|
"text": "(Chakravarthi et al., 2018", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 395, |
|
"end": 423, |
|
"text": "(Chakravarthi et al., , 2019", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 424, |
|
"end": 443, |
|
"text": "Chakravarthi, 2020)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 852, |
|
"end": 868, |
|
"text": "(Caldwell, 1875)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Speech signal carries two types of information: linguistic information which conveys the spoken content and paralinguistic information that covers the speaker attributes. Paralinguistic information can be categorized into three types as shown in Figure 1 . The following sections discuss about paralinguistic features, its applications and acoustic features that represent them. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 246, |
|
"end": 255, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Mainly paralinguistics deal with age, gender, personality, emotion, deviant speech, and discrepant communication. Biological trait primitives such as age and gender are designed by nature. Male and female voices differ due to physiological differences. In addition to this, age and cultural factors contribute to variations in speech. Cultural background determines the native language and regional dialect. Both the biological trait primitives and cultural trait primitives heavily influence the speech.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paralinguistic information in speech", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Deviant Speech is normally a long-term condition caused by disorders such as autism spectrum disorder (ASD), Alzheimer's disease, Parkinson's disease and motor neuron disease. Medium term states such as sleepiness and intoxication by alcohol consumption can also affect the normal speech. However, it returns to normal state quite soon. In the case of discrepant speech, speaker intentionally chooses to use deviant speech, deceptive speech, irony, or sarcasm. Detection of deceptive speech could be beneficial in therapeutic scenarios and forensic scenarios.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paralinguistic information in speech", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "In speaker recognition, combined traits of a speaker are considered. Speaker recognition assigns a given speech segment to a particular speaker. Thus, speech features are used to find biological traits, thereby identifying the speaker. The typical application of speaker characteristics in forensics is speaker identification. The other field of speaker recognition is speaker verification, used in access control systems. The system has to verify that the given speech segment belongs to a speaker within a group of people who are allowed access.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paralinguistic information in speech", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The current emotional state of the speaker influences the tone, volume, and speech rate. Each emotion can be characterized by a unique acoustic pattern. Several studies on speech emotion recognition have been carried out over the time. Prosodic features such as pitch, duration, and quality are found to be important in emotion recognition. The way emotions are expressed depends on the personality of the speaker. Also, some aspects of emotions vary with language and culture. Therefore, it is challenging to develop emotion recognition systems across cultures.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paralinguistic information in speech", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Speech is a time varying signal and it is analyzed frame by frame. The most common features representing the speech signal are referred to as LLDs. Typical LLDs cover intensity, intonation, linear prediction cepstral coefficients (LPCCs), perceptual linear prediction (PLP), mel frequency cepstral coefficients (MFCCs), gammatone frequecny cepstral coefficients (GFCCs) formants, harmonicity, vocal cord perturbation etc. Breakdown of these features into three types namely prosodic, spectral, and voice quality features is shown in Table 1 . These features are generally augmented by other descriptors computed from the raw LLDs such as delta coefficients or regression coefficients. MFCCs are the most widely used feature extraction technique in speech processing applications. MFCCs are computed as follows. Windowed speech signal transformed to frequency domain using discrete Fourier transform, mel filter bank is applied to the magnitude transform and discrete cosine transform is computed on the logs of powers at each mel frequencies.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 533, |
|
"end": 540, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Acoustic features representing paralinguistic information", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Formants are resonant frequencies of the vocal tract. They vary according to the spoken content. In particular, the lower resonance frequencies of the vocal tract, that is, the first two formants are well correlated with the phonetic content while the higher formants describe speaker characteristics. Formants are mostly computed from LPCs. Fundamental frequency and formant frequencies are most important speech parameters and its detection has a significant influence in recognizing emotion from speech (Belean, 2013) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 506, |
|
"end": 520, |
|
"text": "(Belean, 2013)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic features representing paralinguistic information", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Vocal cord perturbation measures such as jitter and shimmer describe the quality of the voice. Jitter is the fluctuation in the length of the fundamental period from one cycle to the next and shimmer is the variation of the waveform amplitude from one cycle to the next. As they describe the pathological characteristics of voice, jitter, and shimmer measures are helpful in determining speaker age or voice pathology (Teixeira and Gon\u00e7alves, 2016).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic features representing paralinguistic information", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Derived features can be computed from the LLDs and they could be combinations of the abovementioned features. The most popular ones that capture the temporal information are the first and second order derivatives referred to as delta, deltadelta coefficients respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic features representing paralinguistic information", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Emotion recognition from speech is one of the major topics in the field of computational paralinguistics. It helps to understand the emotional condition and actual intentions of the speakers which would be beneficial in improving the speech based applications such as automatic speech recognition (Jose et al., 2012; Madhavaraj and Ramakrishnan, 2017; Lokesh et al., 2019) and spoken intent recognition (Yohan et al., 2019b,a) . It can be used to enhance the human computer interaction and to identify the emotional state of the user in call centers. Several research studies have worked on developing speech emotion recognition systems using DNNs. Emo- (Ardila et al., 2020) , Open SLR -Tamil (He et al., 2020) , Microsoft Speech Corpus (Indian languages) -Tamil (mic), and Tamil Speech Intent Dataset -UoM (Buddhika et al., 2018) . However, there is no standard database available for Tamil emotional speech. Emotional speech corpora can consist of three types of emotional speech: acted emotions which is simulated by actors, spontaneous emotions from real life situations, and elicited emotions stimulated through emotional movies, stories and games. It is desired to have good quality speech recordings of diverse set of emotions collected from a large group of speakers. Though spontaneous emotions are preferred, it is difficult to acquire. Therefore, most of the available speech corpora contain acted emotional speech.", |
|
"cite_spans": [ |
|
{ |
|
"start": 297, |
|
"end": 316, |
|
"text": "(Jose et al., 2012;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 317, |
|
"end": 351, |
|
"text": "Madhavaraj and Ramakrishnan, 2017;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 352, |
|
"end": 372, |
|
"text": "Lokesh et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 403, |
|
"end": 426, |
|
"text": "(Yohan et al., 2019b,a)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 654, |
|
"end": 675, |
|
"text": "(Ardila et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 694, |
|
"end": 711, |
|
"text": "(He et al., 2020)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 808, |
|
"end": 831, |
|
"text": "(Buddhika et al., 2018)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Speech Emotion Recognition for Tamil Language", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Joe (2014) built a Tamil emotional speech corpus with speech data consisting of five emotions Happy, Sad, Anger, Fear, and Neutral. Speech recordings were collected from acted emotional speech in Tamil audio plays. Support vector machine (SVM)-based emotion recognition system was used to perform classification with MFCCs as input features. This corpus was extended in (Vasuki et al., 2020) and another emotion corpus was developed separately for children using the samples collected from Tamil movies. Having an emotion corpus consisting of utterances of both the adults and children could help in investigating the influence of age in emotion expression.", |
|
"cite_spans": [ |
|
{ |
|
"start": 370, |
|
"end": 391, |
|
"text": "(Vasuki et al., 2020)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Speech Emotion Recognition for Tamil Language", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In (Renjith and Manju, 2017) , an acoustic feature-based emotion recognition system is presented. The database used in this work is Amritaemo (Poorna et al., 2015) which consists of speech recordings in Tamil and Telugu languages with three emotions: anger, happiness, and sadness. K-Nearest Neighbor (KNN) and Artificial Neural Network (ANN) were used to classify emotions based on two features namely Linear Predictive Cepstral Coefficients (LPCCs) and Hurst Parameter. Hurst parameter is the decaying rate of the autocorrelation coefficient function of the speech signal. The experimental results showed that when individual features were considered, for both the languages Hurst parameter achieved higher accuracy, precision, and recall when compared to LPCCs while the combination of these features resulted in a better performance. Murali et al. (2018) proposed a Gaussian Mixture Model (GMM) -Deep Belief Network (DBN) system for emotion recognition. It models GMM for each emotion independently using MFCC features extracted from the speech signal. The minimum distance between the distribution of features for each utterance with respect to each emotion model is derived as Bag of acoustic features and it is fed to DBN as the feature vector. The proposed system was evaluated on Berlin emotional speech corpus (Burkhardt et al., 2005) as well the emotional database for Tamil language. The database used in this work consists of four emotions namely anger, happy, sad, and neutral state for three languages Tamil, English, and Malayalam.", |
|
"cite_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 28, |
|
"text": "(Renjith and Manju, 2017)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 142, |
|
"end": 163, |
|
"text": "(Poorna et al., 2015)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 838, |
|
"end": 858, |
|
"text": "Murali et al. (2018)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1320, |
|
"end": 1344, |
|
"text": "(Burkhardt et al., 2005)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Speech Emotion Recognition for Tamil Language", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Ram and Ponnusamy (2018) developed a speech corpus to evaluate the Tamil speech emotion recognition of children with autism spectrum disorder. In addition, emotional speech recordings for Tamil and Telugu language were collected from movies and Berlin emotional speech corpus was used as well for training. The selected emotion classes were anger, neutral, happiness, sadness, and fear. MFCC, pitch frequency, and energy were used as the set of features fed to the SVM based classifier. An emotion recognition system using weighted features is proposed in (Poorna et al., 2018) . A speech dataset consisting of audio recordings expressing five emotions namely anger, surprise, disgust, sadness, and happiness was created for three South Indian languages Tamil, Telugu, and Malayalam. Mean and variance of energy contour, first five formant frequencies, and Linear predictive coding (LPC) coefficients were chosen as the speech features and classification was performed using KNN, SVM, and ANN. Classification performance using normal features and weighted features were compared and a significant increase in recognition accuracy was observed with weighted features. ANN achieved the best accuracy among the considered classifier models. The results indicated that the weight factors are language dependent for the input features.", |
|
"cite_spans": [ |
|
{ |
|
"start": 556, |
|
"end": 577, |
|
"text": "(Poorna et al., 2018)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Speech Emotion Recognition for Tamil Language", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The paper (Rajan et al., 2019) presents a multilingual emotional speech corpus TaMaR-EmoDB developed for Tamil, Malayalam, and Ravula. The corpus contains short speech utterances in five emotions: anger, anxiety, happiness, sadness, and neutral state. The corpus was built using simulated speech utterances where the subjects were asked to read a sentence, expressing a given emotion. It was evaluated using a DNN-based classifier and the classification was performed using the fusion of MFCCs and prosodic features such as short-time energy, zero crossing rate, and standard deviation, skewness, and kurtosis of pitch. Sowmya and Rajeswari (2020) created a speech dataset for Tamil language and built a classifier to predict emotions with MFCC and energy as the input features. The database consists of four emotions anger, sad, neutral, and happiness. KNN, SVM, and ANN were used for emotion classification where SVM and ANN achieved higher accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Speech Emotion Recognition for Tamil Language", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The presented study discussed about the paralinguistic information present in speech signal and its relevance in many speech processing applications. The Low-level descriptors characterize the underlying paralinguistic content and they can be used as input features to DNN based models. High level speech representations can be extracted directly from raw speech signal as well using DNNs. These models have shown significant performance in many paralinguistic tasks. Paralinguistic information can also be applied in improving speech recognition systems using normalization techniques. The most popular paralinguistic task, emotion recognition is challenging when applied across different languages and cultures since aspects of emotion expression are language and culture specific.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Therefore, a proper emotional speech database is required to build effective emotion detection models for Tamil language. Several research studies have created speech databases for Tamil language and evaluated them using SVM or DNN based classifiers which reported good performance. However, further improvements are still needed and a standard Tamil emotional speech database needs to be built with a diverse set of emotions and a large number of speakers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "4" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This research was supported by Accelerating Higher Education Expansion and Development (AHEAD) Operation of the Ministry of Education, Sri Lanka funded by the World Bank.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgment", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Low resource speech recognition challenge for Indian Languages", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Interspeech", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Interspeech (2018) Low resource speech recognition challenge for Indian Languages.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Tamil Internet Conference", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Anosha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Uthayasanker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "I. Anosha and T. Uthayasanker. 2020. In Tamil Internet Conference.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Common voice: A massively-multilingual speech corpus", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Ardila", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Branson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Davis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Henretty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Kohler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Meyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Morais", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Saunders", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Tyers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Weber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4211--4215", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Ardila, M. Branson, K.Davis, M. Henretty, M. Kohler, J. Meyer, R. Morais, L. Saunders, F.M. Tyers, and G. Weber. 2020. Common voice: A massively-multilingual speech corpus. In Proceed- ings of the 12th Conference on Language Resources and Evaluation (LREC 2020), pages 4211-4215.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Comparison of formant detection methods used in speech processing applications", |
|
"authors": [ |
|
{ |
|
"first": "Bogdan", |
|
"middle": [], |
|
"last": "Belean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "AIP Conference Proceedings", |
|
"volume": "1565", |
|
"issue": "", |
|
"pages": "85--89", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1063/1.4833702" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bogdan Belean. 2013. Comparison of formant detec- tion methods used in speech processing applications. AIP Conference Proceedings, 1565:85-89.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Voicer: A crowd sourcing tool for speech data collection", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Buddhika", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Liyadipita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Nadeeshan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Witharana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Jayasena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "U", |
|
"middle": [], |
|
"last": "Thayasivam", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "2018 18th International Conference on Advances in ICT for Emerging Regions (ICTer)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "174--181", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICTER.2018.8615521" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Buddhika, R. Liyadipita, S. Nadeeshan, H. Witha- rana, S. Jayasena, and U. Thayasivam. 2018. Voicer: A crowd sourcing tool for speech data collection. In 2018 18th International Conference on Advances in ICT for Emerging Regions (ICTer), pages 174-181.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A database of german emotional speech", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Burkhardt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Paeschke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Rolfes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Sendlmeier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "1517--1520", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F. Burkhardt, A. Paeschke, M. Rolfes, W. Sendlmeier, and B. Weiss. 2005. A database of german emo- tional speech. volume 5, pages 1517-1520.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "1875. A comparative grammar of the Dravidian or South-Indian family of languages", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Caldwell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert Caldwell. 1875. A comparative grammar of the Dravidian or South-Indian family of languages. Tr\u00fcbner.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Leveraging orthographic information to improve machine translation of under-resourced languages", |
|
"authors": [ |
|
{ |
|
"first": "Chakravarthi", |
|
"middle": [], |
|
"last": "Bharathi Raja", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi. 2020. Leveraging ortho- graphic information to improve machine translation of under-resourced languages. Ph.D. thesis, NUI Galway.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Improving wordnets for underresourced languages using machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Mihael", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Arcan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mccrae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 9th Global Wordnet Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "77--86", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, Mihael Arcan, and John P. McCrae. 2018. Improving wordnets for under- resourced languages using machine translation. In Proceedings of the 9th Global Wordnet Conference, pages 77-86, Nanyang Technological University (NTU), Singapore. Global Wordnet Association.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "WordNet gloss translation for underresourced languages using multilingual neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Mihael", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Arcan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mccrae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Second Workshop on Multilingualism at the Intersection of Knowledge Bases and Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--7", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, Mihael Arcan, and John P. McCrae. 2019. WordNet gloss translation for under- resourced languages using multilingual neural ma- chine translation. In Proceedings of the Second Workshop on Multilingualism at the Intersection of Knowledge Bases and Machine Translation, pages 1-7, Dublin, Ireland. European Association for Ma- chine Translation.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Open-source Multi-speaker Speech Corpora for Building Gujarati, Kannada, Malayalam, Marathi, Tamil and Telugu Speech Synthesis Systems", |
|
"authors": [ |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shan-Hui Cathy", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oddur", |
|
"middle": [], |
|
"last": "Kjartansson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Rivera", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Katanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Gutkin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isin", |
|
"middle": [], |
|
"last": "Demirsahin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cibu", |
|
"middle": [], |
|
"last": "Johny", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Jansche", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of The 12th Language Resources and Evaluation Conference (LREC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6494--6503", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fei He, Shan-Hui Cathy Chu, Oddur Kjartansson, Clara Rivera, Anna Katanova, Alexander Gutkin, Isin Demirsahin, Cibu Johny, Martin Jansche, Supheakmungkol Sarin, and Knot Pipatsrisawat. 2020. Open-source Multi-speaker Speech Cor- pora for Building Gujarati, Kannada, Malayalam, Marathi, Tamil and Telugu Speech Synthesis Sys- tems. In Proceedings of The 12th Language Re- sources and Evaluation Conference (LREC), pages 6494-6503, Marseille, France. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Developing tamil emotional speech corpus and evaluating using svm", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Joe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "International Conference on Science Engineering and Management Research (ICSEMR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICSEMR.2014.7043627" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. V. Joe. 2014. Developing tamil emotional speech corpus and evaluating using svm. In 2014 Interna- tional Conference on Science Engineering and Man- agement Research (ICSEMR), pages 1-6.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Initial experiments with tamil lvcsr", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"Melvin" |
|
], |
|
"last": "Jose", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Vu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Schultz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/IALP.2012.46" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Melvin Jose, N. T. Vu, and T. Schultz. 2012. Initial experiments with tamil lvcsr.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "An automatic tamil speech recognition system by using bidirectional recurrent neural network with selforganizing map", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Lokesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Priyan", |
|
"middle": [ |
|
"Malarvizhi" |
|
], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Devi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Parthasarathy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Gokulnath", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Neural Computing and Applications", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s00521-018-3466-5" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Lokesh, Priyan Malarvizhi Kumar, M. Ramya Devi, P. Parthasarathy, and C. Gokulnath. 2019. An automatic tamil speech recognition system by us- ing bidirectional recurrent neural network with self- organizing map. Neural Computing and Applica- tions, 31.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Design and development of a large vocabulary, continuous speech recognition system for tamil", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Madhavaraj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Ramakrishnan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "14th IEEE India Council International Conference (INDI-CON)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--5", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/INDICON.2017.8488025" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Madhavaraj and A.G. Ramakrishnan. 2017. Design and development of a large vocabulary, continuous speech recognition system for tamil. In 2017 14th IEEE India Council International Conference (INDI- CON), pages 1-5.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Tamil speech emotion recognition using deep belief network(dbn)", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Murali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pravena", |
|
"middle": [], |
|
"last": "Srikanth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Govind", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Advances in Signal Processing and Intelligent Recognition Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "328--336", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-319-67934-1_29" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Murali, Srikanth, Pravena, and Govind. 2018. Tamil speech emotion recognition using deep belief net- work(dbn). In Advances in Signal Processing and Intelligent Recognition Systems, pages 328-336.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "A Weight Based Approach for Emotion Recognition from Speech: An Analysis Using South Indian Languages", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Poorna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Anuraj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Nair", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Second International Conference, ICSCS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "14--24", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-981-13-1936-5_2" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S.S. Poorna, K. Anuraj, and G. Nair. 2018. A Weight Based Approach for Emotion Recognition from Speech: An Analysis Using South Indian Languages. In Second International Conference, ICSCS, pages 14-24.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Emotion recognition using multiparameter speech feature classification", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Poorna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Jeevitha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Nair", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Santhosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Nair", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "2015 International Conference on Computers, Communications, and Systems (ICCCS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "217--222", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/CCOMS.2015.7562904" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S.S. Poorna, C. Jeevitha, S. Nair, S. Santhosh, and G.J. Nair. 2015. Emotion recognition using multi- parameter speech feature classification. In 2015 In- ternational Conference on Computers, Communica- tions, and Systems (ICCCS), pages 217-222.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Design and Development of a Multi-Lingual Speech Corpora (TaMaR-EmoDB) for Emotion", |
|
"authors": [ |
|
{ |
|
"first": "U", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Rajeev Rajan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Haritha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Sujitha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rejisha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Analysis. In INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3267--3271", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.21437/Interspeech.2019-2034" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rajeev Rajan, U.G. Haritha, A.C. Sujitha, and T.M. Re- jisha. 2019. Design and Development of a Multi- Lingual Speech Corpora (TaMaR-EmoDB) for Emo- tion Analysis. In INTERSPEECH, pages 3267- 3271.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Toward design and enhancement of emotion recognition system through speech signals of autism spectrum disorder children for tamil language using multi-support vector machine", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Sunitha Ram", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Ponnusamy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of International Conference on Computational Intelligence and Data Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "145--158", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-981-10-6319-0_13" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Sunitha Ram and R. Ponnusamy. 2018. Toward de- sign and enhancement of emotion recognition sys- tem through speech signals of autism spectrum disor- der children for tamil language using multi-support vector machine. In Proceedings of International Conference on Computational Intelligence and Data Engineering, pages 145-158.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Speech based emotion recognition in tamil and telugu using lpcc and hurst parameters -a comparitive study using knn and ann classifiers", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Renjith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Manju", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 International Conference on Circuit ,Power and Computing Technologies (ICCPCT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICCPCT.2017.8074220" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Renjith and K.G. Manju. 2017. Speech based emo- tion recognition in tamil and telugu using lpcc and hurst parameters -a comparitive study using knn and ann classifiers. In 2017 International Confer- ence on Circuit ,Power and Computing Technologies (ICCPCT), pages 1-6.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Paralinguistics in speech and language -state-of-the-art and the challenge", |
|
"authors": [ |
|
{ |
|
"first": "Bj\u00f6rn", |
|
"middle": [], |
|
"last": "Schuller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Steidl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anton", |
|
"middle": [], |
|
"last": "Batliner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Burkhardt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurence", |
|
"middle": [], |
|
"last": "Devillers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shrikanth", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Special Issue on Paralinguistics in Naturalistic Speech and Language", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.csl.2012.02.005" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bj\u00f6rn Schuller, Stefan Steidl, Anton Batliner, Felix Burkhardt, Laurence Devillers, Christian M\u00fcller, and Shrikanth Narayan. 2013. Paralinguistics in speech and language -state-of-the-art and the chal- lenge. Computer Speech and Language, Special Is- sue on Paralinguistics in Naturalistic Speech and Language.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Computational Paralinguistics: Emotion, Affect and Personality in Speech and Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Bj\u00f6rn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anton", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Schuller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Batliner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bj\u00f6rn W. Schuller and Anton M. Batliner. 2014. Com- putational Paralinguistics: Emotion, Affect and Per- sonality in Speech and Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Speech emotion recognition for tamil language speakers", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Sowmya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Rajeswari", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Advances in Intelligent Systems and Computing, Machine Intelligence and Signal Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "125--136", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-981-15-1366-4_10" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "V. Sowmya and A. Rajeswari. 2020. Speech emo- tion recognition for tamil language speakers. In Ad- vances in Intelligent Systems and Computing, Ma- chine Intelligence and Signal Processing. MISP, pages 125-136.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Algorithm for jitter and shimmer measurement in pathologic voices", |
|
"authors": [ |
|
{ |
|
"first": "Paulo", |
|
"middle": [], |
|
"last": "Jo\u00e3o", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9", |
|
"middle": [], |
|
"last": "Teixeira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gon\u00e7alves", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Procedia Computer Science", |
|
"volume": "100", |
|
"issue": "", |
|
"pages": "271--279", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.procs.2016.09.155" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jo\u00e3o Paulo Teixeira and Andr\u00e9 Gon\u00e7alves. 2016. Algo- rithm for jitter and shimmer measurement in patho- logic voices. Procedia Computer Science, 100:271 - 279.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Construction and evaluation of tamil speech emotion corpus", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Vasuki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Sambavi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vijesh", |
|
"middle": [], |
|
"last": "Joe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "National Academy Science Letters", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s40009-020-00907-1" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Vasuki, B. Sambavi, and Vijesh Joe. 2020. Construc- tion and evaluation of tamil speech emotion corpus. National Academy Science Letters, 43.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Sinhala and tamil speech intent identification from english phoneme based asr", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Yohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Uthayasanker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Surangika", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Asian Language Processing (IALP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "234--239", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "K. Yohan, T. Uthayasanker, and R. Surangika. 2019a. Sinhala and tamil speech intent identification from english phoneme based asr. 2019 International Conference on Asian Language Processing (IALP), pages 234-239.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Transfer learning based free-form speech command classification for low-resource languages", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Yohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Uthayasanker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Surangika", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics: Student Research Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "288--294", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-2040" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "K. Yohan, T. Uthayasanker, and R. Surangika. 2019b. Transfer learning based free-form speech command classification for low-resource languages. In Pro- ceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics: Student Re- search Workshop, pages 288-294, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Types of paralinguistic information" |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "", |
|
"content": "<table><tr><td>: Acoustic features</td></tr><tr><td>tion expression varies across cultures and every</td></tr><tr><td>emotion is expressed somewhat differently by each</td></tr><tr><td>culture. Thus, there is a need to build and evaluate</td></tr><tr><td>a Tamil emotional speech corpus to observe the rep-</td></tr><tr><td>resentation of different emotions in Tamil speech.</td></tr><tr><td>Some of the available Tamil speech datasets include</td></tr><tr><td>mozilla common voice</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |