|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:34:07.371065Z" |
|
}, |
|
"title": "Gender Detection from Human Voice Using Tensor Analysis", |
|
"authors": [ |
|
{ |
|
"first": "Prasanta", |
|
"middle": [], |
|
"last": "Roy", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology Guwahati Guwahati", |
|
"location": { |
|
"settlement": "Assam", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Parabattina", |
|
"middle": [], |
|
"last": "Bhagath", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology Guwahati Guwahati", |
|
"location": { |
|
"settlement": "Assam", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Pradip", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Das", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology Guwahati Guwahati", |
|
"location": { |
|
"settlement": "Assam", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Speech-based communication is one of the most preferred modes of communication for humans. The human voice contains several important information and clues that help in interpreting the voice message. The gender of the speaker can be accurately guessed by a person based on the received voice of a speaker. The knowledge of the speaker's gender can be a great aid to design accurate speech recognition systems. GMM based classifier is a popular choice used for gender detection. In this paper, we propose a Tensor-based approach for detecting the gender of a speaker and discuss its implementation details for low resourceful languages. Experiments were conducted using the TIMIT and SHRUTI dataset. An average gender detection accuracy of 91% is recorded. Analysis of the results with the proposed method is presented in this paper.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Speech-based communication is one of the most preferred modes of communication for humans. The human voice contains several important information and clues that help in interpreting the voice message. The gender of the speaker can be accurately guessed by a person based on the received voice of a speaker. The knowledge of the speaker's gender can be a great aid to design accurate speech recognition systems. GMM based classifier is a popular choice used for gender detection. In this paper, we propose a Tensor-based approach for detecting the gender of a speaker and discuss its implementation details for low resourceful languages. Experiments were conducted using the TIMIT and SHRUTI dataset. An average gender detection accuracy of 91% is recorded. Analysis of the results with the proposed method is presented in this paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Gender detection is one of the important problems in speaker and speech recognition domains. It has got significance because of the gain in popularity of voice-based systems like Alexa, Google Assistant, Cortana, Siri, etc. One of the applications of this is helping companies to provide better solutions. In speech recognition, it helps in improving the accuracy of recognition. It also has importance in sub-problems like age detection, emotion detection, speaker identification, etc. Research on the gender detection problem started in the early '90s. The problem was studied by using features like Linear Predictive Cepstral Coefficients (LPCCs), energy, Mel Frequency Cepstral Coefficients (MFCCs), etc. Konig and Morgan (Konig and Morgan, 1992) used LPCCs in their work to address this problem. In the system that was proposed, a multi-layer perceptron was employed for the classification of gender. As a result, this system achieved an accuracy of 84% on DARPA resource management database. Neti (Neti and Roukos, 1997) proposed a GMM (Gaussian Mixture Model) based gender classification approach for an Air Travel Information System (ATIS) corpus. It was reported that 95% accuracy was obtained. This was an improvement over a simple pattern matching approach. MFCCs have widely accepted features in speaker characterization. They play an important role in GMM based systems that deal with gender recognition task. Tzanetakis (Tzanetakis and Cook, 2002) proposed a system that uses the above-mentioned features. The system was developed with gender classification and sports announcement facilities. Along with the techniques that are discussed, there are papers available on the same problem. In these systems, the pitch was used as a crucial feature. Several studies agree that modeling techniques like Convolutional Neural Networks (CNNs) (Doukhan et al., 2018) , Expectation-Maximization (EM) (Y\u00fccesoy and Nabiyev, 2013) , Hidden Markov Models (HMMs) (Parris and Carey, 1996) , Support Vector Machine (SVM) classifiers (Jo et al., 2008) are successful in this area of research. GMM-based classifiers and Expectation-Maximization (EM) have been used predominantly for modeling and parameter estimation, respectively. Most of the methods for estimating parameters of GMM are based on Maximum Likelihood Estimation (MLE), which has a drawback of getting stuck in a local optimum. So it needs to restart indefinitely to search for global optimum, and sometimes it may not find global optimum at all. As a result, the whole process of parameter estimation becomes very timeconsuming. In this paper, we have proposed an eigenvector-based approach to detect the gender from human voice using tensor analysis. We have used MFCCs as feature vector to form the feature vector space. Method of moments is used to build the tensor structure from the feature vector space for each gender. The tensor power method is applied to compute the eigenvectors from that tensor structure (Anandkumar et al., 2014) . The proposed approach does not require multiple restarts but still provides 91% accuracy using Euclidean distance for evaluations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 709, |
|
"end": 750, |
|
"text": "Konig and Morgan (Konig and Morgan, 1992)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1003, |
|
"end": 1026, |
|
"text": "(Neti and Roukos, 1997)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1434, |
|
"end": 1461, |
|
"text": "(Tzanetakis and Cook, 2002)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 1850, |
|
"end": 1872, |
|
"text": "(Doukhan et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1905, |
|
"end": 1932, |
|
"text": "(Y\u00fccesoy and Nabiyev, 2013)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1963, |
|
"end": 1987, |
|
"text": "(Parris and Carey, 1996)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 2031, |
|
"end": 2048, |
|
"text": "(Jo et al., 2008)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 2978, |
|
"end": 3003, |
|
"text": "(Anandkumar et al., 2014)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "In this section, we will go through the basics of Tensors and related multi-linear algebra that are essential concepts to understand the tensor power method (Anandkumar et al., 2017) and its usefulness in parameter estimation of latent variable models. A comprehensive study about tensor is available in the work of Kolda (Kolda and Bader, 2009) and Sidiropoulos (Sidiropoulos et al., 2017) , whereas a multilinear map and its notations can be found in the work of Lim (Lek-Heng Lim, 2005) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 182, |
|
"text": "(Anandkumar et al., 2017)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 322, |
|
"end": 345, |
|
"text": "(Kolda and Bader, 2009)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 350, |
|
"end": 390, |
|
"text": "Sidiropoulos (Sidiropoulos et al., 2017)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 465, |
|
"end": 489, |
|
"text": "Lim (Lek-Heng Lim, 2005)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Basic Understanding of Tensors", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Tensor is a multiway collection of numbers or an extension of a matrix in higher order. Vectors and Matrices are firstorder and second-order tensors, respectively. In general, a p th order tensor is an object that can be interpreted as a p-dimensional array of numbers. Tensor order is the number of dimensions of the tensor. Though the tensor can be of any order, we will describe tensor as a 3 rd order tensor structure in our experiments. For discussion, an N-way ten-sor is the same as N-order tensor or vice versa. In terms of notation, a scalar is denoted by lower case letters a \u2208 R, vectors by bold lower case letter a \u2208 R I1 , matrices by upper case bold letter A \u2208 R I1\u00d7I2 and for higher order tensor calligraphic letters are used A \u2208 R I1\u00d7I2\u00d7...\u00d7I N . Figure 1 : Zero th Order Tensor (a \u2208 R, First Order Tensor (a \u2208 R 4 ), Second Order Tensor (A \u2208 R 4\u00d73 ), Third Order Tensor (A \u2208 R 4\u00d73\u00d75 ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 763, |
|
"end": 771, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Tensor Preliminaries", |
|
"sec_num": "2.1." |
|
}, |
|
{ |
|
"text": "Vector outer product is the element-wise product of two vectors. The outer product of two vectors produces a Matrix, which is a second-order tensor. In this discussion, the outer product will be denoted by symbol. For instance, if a and b are two n-sized vectors then their outer product will produce a matrix A as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Outer Product and Inner Product", |
|
"sec_num": "2.1.1." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "A = a b = ab T", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Outer Product and Inner Product", |
|
"sec_num": "2.1.1." |
|
}, |
|
{ |
|
"text": "Similarly, the outer product of three vectors will generate 3 rd order tensor, which will be relevant to our topic of discussion. In general, the outer product of n vectors creates n-order tensor.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Outer Product and Inner Product", |
|
"sec_num": "2.1.1." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "A = a (1) a (2) a (3) ..... a (n)", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Outer Product and Inner Product", |
|
"sec_num": "2.1.1." |
|
}, |
|
{ |
|
"text": "In contrast to this, the inner product of two m-sized vectors will generate a scalar.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Outer Product and Inner Product", |
|
"sec_num": "2.1.1." |
|
}, |
|
{ |
|
"text": "a = a T b = m i=1 a i b i (3) 2.1.2. Tensor Rank", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Outer Product and Inner Product", |
|
"sec_num": "2.1.1." |
|
}, |
|
{ |
|
"text": "Tensor rank is one of the important properties of a tensor. Before going to tensor rank, we will discuss about Rank-1 tensor. If an N-order tensor is strictly decomposed as an outer product of N vectors, then the N-order tensor is a Rank-1 tensor. So a Rank-1 matrix (2-way tensor) can be written as A = a b. Similarly a Rank-1-third-order tensor can be represented as A = a b c. Minimum number of rank-1 N order tensors required that can sum up as N order tensor is called the rank of the Norder tensor. A rank-R third-order tensor can be repre-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Outer Product and Inner Product", |
|
"sec_num": "2.1.1." |
|
}, |
|
{ |
|
"text": "sented as A = R i=1 \u03bb i a i b i c i .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Outer Product and Inner Product", |
|
"sec_num": "2.1.1." |
|
}, |
|
{ |
|
"text": "Here the \u03bb is used to represent the weighting factor during normalization of matrices, which are the other factors of the resultant tensor.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Outer Product and Inner Product", |
|
"sec_num": "2.1.1." |
|
}, |
|
{ |
|
"text": "In Mathematics, it is fundamental to decompose an object into some simpler and easy-to-handle objects. Matrix decomposition techniques are significant in the field of Mathematics in their application to solve linear equation systems and the implementation of numerical algorithms efficiently.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Decomposition", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "In the following part, we have discussed the nonuniqueness of general matrix decomposition and the uniqueness of tensor decomposition with much-relaxed conditions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Decomposition", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "In our discussion on matrix decomposition, we focus on matrix rank decomposition, which is an information extraction technique. It can be expressed by the following equation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Matrix Decomposition and Rotational Problem", |
|
"sec_num": "2.2.1." |
|
}, |
|
{ |
|
"text": "A = BC T (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Matrix Decomposition and Rotational Problem", |
|
"sec_num": "2.2.1." |
|
}, |
|
{ |
|
"text": "where A \u2208 R n\u00d7m , B \u2208 R n\u00d7r , C \u2208 R m\u00d7r and r is rank of the decomposition. Similar work was carried out by Charles Spearman, a British Psychologist in 1904, which is popularly known as Spearman's Hypothesis. However Equation 4 is not unique. By using another invertible matrix R, we can create another decomposition. Absorbing R on the left with B and R \u22121 on the right of C we can generate matrix\u1e02 and\u010a respectively which can be used to reconstruct A.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Matrix Decomposition and Rotational Problem", |
|
"sec_num": "2.2.1." |
|
}, |
|
{ |
|
"text": "A = BC T = BRR \u22121 C T = (BR)(R \u22121 C T ) =\u1e02\u010a (5)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Matrix Decomposition and Rotational Problem", |
|
"sec_num": "2.2.1." |
|
}, |
|
{ |
|
"text": "We can see that matrix rank-decomposition is non-unique generally. Though some decomposition techniques provide unique decomposition over some conditions such as orthogonality for Singular Value Decomposition (SVD), tensor decomposition is unique under much milder conditions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Matrix Decomposition and Rotational Problem", |
|
"sec_num": "2.2.1." |
|
}, |
|
{ |
|
"text": "Tensor decomposition is unique only if there is one type of rank-1 tensor that sums up to our main tensor with a certain scaling factor. It means we cannot construct a different arrangement of rank-1 tensors that can sum up to our desired main tensor. The uniqueness of tensor decomposition is under much milder conditions than matrix decomposition. Let's consider a slice of a tensor A which can be represented as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Uniqueness and Rigidness", |
|
"sec_num": "2.2.2." |
|
}, |
|
{ |
|
"text": "A k = R i=1 (a i b i )c ki (6)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Uniqueness and Rigidness", |
|
"sec_num": "2.2.2." |
|
}, |
|
{ |
|
"text": "Here k represents the k th slice which is also a low-rank matrix. Therefore a tensor is not just a low-rank collection of these slices, there is an interrelation among them. If we observe, each slice is a differently scaled representation of the same matrix. This constraint helps us to address the rotational problem of a matrix that is faced during matrix decomposition.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Uniqueness and Rigidness", |
|
"sec_num": "2.2.2." |
|
}, |
|
{ |
|
"text": "To determine the factors that capture the underlying structure of a tensor, we subtract the scaled matrix formed by those factors. For matrices, there are multiple possibilities of finding those factors. But for tensors, these factors have to satisfy all the slices, thus making a strong interconnection between the slices, which further makes the tensor more rigid.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Uniqueness and Rigidness", |
|
"sec_num": "2.2.2." |
|
}, |
|
{ |
|
"text": "Tensor Decomposition is one of the most studied topics of tensors. There are two different families of tensor decomposition techniques as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Decomposition Algorithms", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "1. Canonical Polyadic Decomposition (CPD) 2. Tucker Decomposition", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Decomposition Algorithms", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "CPD is mainly used for latent parameter estimation, and Tucker is used for compression, dimensionality reduction, estimation of subspace, etc. In the following subsections, first, we have discussed the basic understanding of CPD and Tucker decomposition, followed by the tensor power method, which is a special kind of CPD decomposition. The tensor power method is used in our proposed approach.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Decomposition Algorithms", |
|
"sec_num": "2.3." |
|
}, |
|
{ |
|
"text": "A rank decomposition is a way to express a tensor as a sum of rank-1 tensors of finite numbers. Rank decomposition has been discovered differently in different knowledge domains in many forms. Parallel Factors (PARAFAC) and Canonical Decomposition (CANDECOMP) is the most popular among them. The basic principle is the same for them. We will refer to this as CANDECOMP/PARAFAC or Canonical polyadic decomposition. CPD for a 3-way Tensor(A) can be expressed as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Canonical Polyadic Decomposition", |
|
"sec_num": "2.3.1." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "min A A \u2212\u00c2 where\u00c2 = R i=1 a i b i c i", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Canonical Polyadic Decomposition", |
|
"sec_num": "2.3.1." |
|
}, |
|
{ |
|
"text": "Different algorithms are available to compute the CPD of any given tensor. Jennrich's and Alternating Least Square Algorithm (ALS) are the most popular among them. Let A, B and C be factor matrices that holds the combination of vectors (a i , b i , c i ) forming the rank-1 tensor A as columns.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Canonical Polyadic Decomposition", |
|
"sec_num": "2.3.1." |
|
}, |
|
{ |
|
"text": "A = [a 1 a 2 ...a R ] B = [b 1 b 2 ...b R ] C = [c 1 c 2 ...c R ]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Canonical Polyadic Decomposition", |
|
"sec_num": "2.3.1." |
|
}, |
|
{ |
|
"text": "Jennrich's algorithm states that if A, B, and C are linearly independent, then the matrix have full rank. We can use this algorithm to compute the factor matrices as the tensor", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Canonical Polyadic Decomposition", |
|
"sec_num": "2.3.1." |
|
}, |
|
{ |
|
"text": "A = R i=1 \u03bb i a i b i c i .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Canonical Polyadic Decomposition", |
|
"sec_num": "2.3.1." |
|
}, |
|
{ |
|
"text": "It is unique up to a trivial permutation of rank and scaling factors. This algorithm works for some problem, but it does not consider all the tensor slices, and it also requires a good difference between two successive eigen values (eigen-gap), absence of which causes numerical instability. ALS is state of the art for modern tensor decomposition techniques in the CPD family. The key idea is to fix all factor matrices for the tensor except one and then estimating the non-fixed matrix. This step is repeated for all the factor matrices until a specific stopping criterion is achieved. Though the ALS algorithm is straightforward, it takes several steps to converge, and sometimes it may also get stuck at a local optimum.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Canonical Polyadic Decomposition", |
|
"sec_num": "2.3.1." |
|
}, |
|
{ |
|
"text": "In this type of decomposition, a tensor is decomposed in a core tensor and factor matrices. Algorithms like Higher-Order Singular Value Decomposition (HOSVD), Higher-Order Orthogonal Iteration (HOOI) comes under this family of decomposition. However, in contrast to CPD, Tucker decomposition is not unique, and so it is not used for the estimation of latent variables.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tucker Decomposition", |
|
"sec_num": "2.3.2." |
|
}, |
|
{ |
|
"text": "This method is a special type that comes under the CPD family. The tensors that can be decomposed by this algorithm should have the following structure:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Power Method", |
|
"sec_num": "2.3.3." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "A = R i=1 \u03bb i a i a i a i", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Tensor Power Method", |
|
"sec_num": "2.3.3." |
|
}, |
|
{ |
|
"text": "In this special case, the factor matrices have to be identical, and a i 's need to be orthogonal to construct vectors from rank-1 tensors. It is very similar to the matrix power method, but this algorithm tries to calculate top singular vectors in a tensor. The main idea behind the matrix power method is to estimate the eigenvector a i,k+1 to a i as well as the eigenvalue \u03bb i based on the following recurrence relation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Power Method", |
|
"sec_num": "2.3.3." |
|
}, |
|
{ |
|
"text": "a i,k+1 = A i (I, a i,k ) A i (I, a i,k ) 2 = A i a i,k A i a i,k 2 (9)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Power Method", |
|
"sec_num": "2.3.3." |
|
}, |
|
{ |
|
"text": "where a i,0 will be chosen randomly, or it can be initialized with some correlation to the true eigenvector if possible. This approximation follows the eigenvector/-value relationship Aa i = A(I, a i ) = \u03bb i a i . The top singular value can be computed from the computed eigenvector after convergence. As we have to calculate the first few dominant eigenvalues, this can be computed by the same process after deflating the matrix by the following formulae:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Power Method", |
|
"sec_num": "2.3.3." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "A i+1 = A i \u2212 \u03bb i a i a i", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Tensor Power Method", |
|
"sec_num": "2.3.3." |
|
}, |
|
{ |
|
"text": "To use this matrix power method in the Tensor approach, we have to incorporate the following changes in Equation (9).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Power Method", |
|
"sec_num": "2.3.3." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "a i,k+1 = A i (I, a i,k , a i,k ) A i (I, a i,k , a i,k )", |
|
"eq_num": "(11)" |
|
} |
|
], |
|
"section": "Tensor Power Method", |
|
"sec_num": "2.3.3." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "A i+1 = A i \u2212 \u03bb i a i a i a i", |
|
"eq_num": "(12)" |
|
} |
|
], |
|
"section": "Tensor Power Method", |
|
"sec_num": "2.3.3." |
|
}, |
|
{ |
|
"text": "This tensor Power method was used in the proposed method because of its efficiency in calculating the tensor. In the next section, the approach is explained in detail.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Power Method", |
|
"sec_num": "2.3.3." |
|
}, |
|
{ |
|
"text": "An uttered sound of a speaker is a collection of feature vectors. Each feature vector is a scaled sum of eigenvectors of that feature vector space. Some of these eigenvectors can be factors that represent age, gender, or other properties about the speakers while some form the content of the speech. If we collect feature vectors of male speaker utterances and construct a feature vector space from those, then that feature vector space gets dominated by the eigenvectors, which are the factors of masculinity. The same goes for females. For any unknown utterances of the speaker, if we find the presence of these eigenvectors, we can infer the gender of the speaker. The following part consists of feature vector space generation of each gender, computation of dominant eigenvectors using the tensor power method, and finding the presence of these eigenvectors in an unknown utterance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "We have used MFCCs as feature vectors to generate vectorspace for each gender as MFCC is based on the principle of the human's auditory system. Twenty-six MFCCs are collected from each frame of an utterance. Thus each feature vector is of twenty six dimensions (x \u2208 R 26 ). We have a collection of utterances for male and female speakers. We have computed feature vectors from each of the collections and obtained a set of feature vectors for each gender. This set of feature vectors works as a feature space that is used to compute dominant eigenvectors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Vector Space Generation", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "Before applying the tensor power method to compute the dominant eigenvectors, we have to form tensor from the feature vectors of each feature-space. A 3 rd order tensor is constructed from each set of feature vectors. Method of moments is used to construct the 3 rd order tensor. The first raw moment is the mean, which can be computed by the following:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Formation", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "m 1 = \u00b5 = E[x] = 1 N N i=1 x i (13)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Formation", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "where N is the number of feature vectors in each gender set. Second ordinal moment can be computed by the following:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Formation", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "M 2 = E[x x] \u2212 \u03c3 2 I (14)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Formation", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "where \u03c3 2 is the smallest eigenvalue of the covarience matrix ( \u03a3 = E[x x] \u2212 m 1 m 1 ) and I is the Identity matrix (I \u2208 R d\u00d7d ). Similarly the third ordinal moment can be computed as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Formation", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "M 3 = E[x x x] \u2212 \u03c3 2 d i=1 (m 1 e i e i +e i m 1 e i + e i e i m 1 )", |
|
"eq_num": "(15)" |
|
} |
|
], |
|
"section": "Tensor Formation", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "where e i is the basis vector in i th dimension. From the work of Hsu and Kakade (Hsu and Kakade, 2013) these moments can be reduced to the following forms:", |
|
"cite_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 103, |
|
"text": "(Hsu and Kakade, 2013)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Formation", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "M 2 = p i=1 w i a i a i (16) M 3 = p i=1 w i a i a i a i", |
|
"eq_num": "(17)" |
|
} |
|
], |
|
"section": "Tensor Formation", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "Thus M 3 is the scaled sum of p eigenvectors (a i ). We need to find the k dominant eigenvectors that are responsible for the gender property of the speaker. M 2 could have been used to compute a i s, but due to matrix rotational problem, it can not be computed accurately. Whereas in tensor (3 rd order or higher), these can be computed more easily. These Eigenvectors (a i ) can be computed by the tensor power method only if they are orthogonal in nature. For that, we have to orthogonalize M 3 . This has been done using M 2 . It is assumed that if a Matrix is found that can orthogonalize M 2 can help to orthogonalize M 3 . This orthogonalization of M 2 can be represented as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Formation", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "M 2 (W, W ) = W T M 2 W = I (18)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Formation", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "where W is the orthogonalizing matrix, It is also known as the whitening matrix. W can be calculated with the help of eigenvalue decomposition of second-order moment M 2 :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Formation", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "M 2 = U DU T", |
|
"eq_num": "(19)" |
|
} |
|
], |
|
"section": "Tensor Formation", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "Singular value decomposition has been used to find U , D from Equation (19). W is computed as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Formation", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "W = U D \u2020 1 2", |
|
"eq_num": "(20)" |
|
} |
|
], |
|
"section": "Tensor Formation", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "where U \u2208 R d\u00d7k is a matrix of orthonormal eigenvectors, D \u2208 R k\u00d7k is a diagonal matrix of the eigenvalues of M 2 and A \u2020 is the Moore-Penrose pseudoinverse of matrix A. By using the following formulae W transforms M 3 into whitened space.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Formation", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "M 3 = M 3 (W, W, W ) 1 = k i=1 \u03bb i v i v i v i (21)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Formation", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "where v i and \u03bb i are converted eigenvectors and scaling factors respectively after orthogonalization of M 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tensor Formation", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "Now on M 3 we have applied tensor power method to identify dominant eigenvectors (v i ). We shall use Equation 11and Equation 12to compute the v i s and deflate the tensor, respectively. This process will be repeated until k dominant eigenvectors are obtained. As v i s are computed from orthogonalized tensor ( M 3 ), so by applying the inversion of the orthogonalization process we transform v i s to a i s of M 3 . We shall use the following formulae to do so:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Eigenvectors Computation", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "A = (W T ) \u2020 V Diag(\u03bb)", |
|
"eq_num": "(22)" |
|
} |
|
], |
|
"section": "Eigenvectors Computation", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "where A is the set of k number of a i s, V is the set of k number of v i and \u03bb i are k eigenvalues computed from the tensor power method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Eigenvectors Computation", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "1 A k th order tensor is denoted by A = aj 1 ...j k \u2208 R d 1 \u00d7...\u00d7d k . Then covariant multi-linear matrix multiplication of A by M1 = [m (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Eigenvectors Computation", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "j 1 i 1 ] \u2208 R d 1 \u00d7p 1 , . . . , M k = [m (k) j 1 i 1 ] \u2208 R d k \u00d7p k can be defined as: A(M1, . . . , M k ) = d 1 j 1 =1 . . . d k j k =1 aj 1 ...j k m (1) j 1 i 1 . . . m (k) j k i k \u2208 R p 1 \u00d7...\u00d7p k", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Eigenvectors Computation", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "We have obtained k dominant eigenvectors from each of the feature vector set of male and female speakers. A m and A f are the eigenvectors set of male and female speaker, respectively. For any unknown the feature vector in the feature space, we will calculate distance from the dominating eigenvector (minimum distance). The distance for i th feature vector (x i ) is calculated by using the following formula:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Creation and Evaluation", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "D i = min k ( d j=1 (a kj \u2212 x ij ) 2 )", |
|
"eq_num": "(23)" |
|
} |
|
], |
|
"section": "Model Creation and Evaluation", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "Total distance from A f and A m can be computed as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Creation and Evaluation", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "D m = N i=1 D i (24) D f = N i=1 D i (25)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Creation and Evaluation", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "where N is the total number of feature vectors (Number of frames) for a voice sample.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Creation and Evaluation", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "Features vectors collected from male voice will be containing vectors which are affected by male eigenvectors, whereas it will be less affected by the female eigenvectors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Creation and Evaluation", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "Thus D m will be less than D f . For similar reasons, D f will be less than D m for the female voice.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Creation and Evaluation", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "Experiments were conducted on two different datasets (TIMIT (S Garofolo et al., 1992) and SHRUTI (Das et al., 2011) ). The study can be divided into three different cases, as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 60, |
|
"end": 85, |
|
"text": "(S Garofolo et al., 1992)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 97, |
|
"end": 115, |
|
"text": "(Das et al., 2011)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "1. TIMIT DR1 2. TIMIT Mix 3. SHRUTI dataset", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "The first dataset is a subset of the TIMIT dataset, which consists of only the New England dialect. TIMIT Mix dataset is the subset that contains eight different dialect regions. The third dataset is a collection of spoken sentences belonging to the Bengali language. Bengali is the predominant language used in West Bengal, a state of the Indian subcontinent. In the present work, a subpart of this database was used. Table 4 . gives the complete description of the dataset used in the study. The results obtained using the approach are discussed in the next section. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 419, |
|
"end": 426, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "The results are presented for different cases, as follows: Next, the performance of the proposed approach with respect to different numbers of dominating eigenvectors was evaluated. In this experiment, the TIMIT Mix dataset was used. The results are shown in Table 3 . This experiment also shows that there is an increment in average gender detection accuracy, which denotes that the eigenvectors computed by the proposed approach are relevant to gender detection. Table 3 : Performance with respect to the number of dominant eigenvectors (k).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 259, |
|
"end": 266, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 465, |
|
"end": 472, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "We tested the performance of the proposed approach in different datasets: SHRUTI, TIMIT Mix, and TIMIT DR1. Figure 2 shows that the proposed method provides consistent performance across different datasets. To test whether Figure 2 : Performance of the proposed approach for different datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 116, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 223, |
|
"end": 231, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Number of Eigenvectors", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "the proposed approach is capturing the language-specific or voice-specific gender property, we computed eigenvectors using TIMIT Mix dataset and evaluated with other datasets. We have obtained a comparable accuracy in different datasets (Figure 3) , which demonstrates that the proposed approach captures the voice-specific gender property. Figure 3 : Performance of the proposed approach for different datasets trained using single dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 237, |
|
"end": 247, |
|
"text": "(Figure 3)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 341, |
|
"end": 349, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Number of Eigenvectors", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We evaluated its performance with respect to noisy utterances. Figure 4 shows the performance of the proposed approach with different Signal to Noise Ratio (SNR). The proposed method provides a consistent performance where the SNR is more than ten for input utterances.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 63, |
|
"end": 71, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Number of Eigenvectors", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Accuracy (%) GMM -EM Proposed approach 13 93.2 84.1 26 97.4 91.4 Table 4 : Performance comparison of GMM and the proposed approach.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 72, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Size of feature vector", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We also compared the performance of our approach with the modern, state-of-the-art GMM-EM on the TIMIT Figure 4 : Performance of the proposed approach with respect to noisy data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 111, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Size of feature vector", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "dataset. We conducted this experiment on the feature vector of size thirteen and twenty-six. We have presented our results in Table 4 . Figure 5 provides a comparison between GMM-EM and the proposed method. Even though the detection efficiency of the proposed approach is comparatively less, but the proposed approach does not require multiple restarts like GMM-EM, and the improvement of results with the varying feature vectors is encouraging.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 133, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Size of feature vector", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this paper, a simple yet effective tensor-based approach was proposed for gender detection from the human voice.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "In the approach, we have computed dominant eigenvectors of the feature space of utterances using tensor analysis. It is demonstrated that the proposed method captures the relevant gender properties of the human voice and also provides consistent performance for high dimensional feature vectors. We have evaluated this approach on different datasets and proved that its performance is consistent with an accuracy of 91% in each case. We have also demonstrated its performance on noisy data and concluded that it provides reasonable accuracy for SNR higher than 10. The proposed approach provided comparable performance with respect to", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6." |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "which ensures that with further improvement, and it can offer better performance without the drawbacks of GMM-EM. This work shows that the eigenvector-based approach using tensor analysis provides consistent performance irrespective of the dataset", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gmm-Em", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "GMM-EM, which ensures that with further improvement, and it can offer better performance without the drawbacks of GMM-EM. This work shows that the eigenvector-based approach using tensor analysis provides consistent perfor- mance irrespective of the dataset.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Tensor decompositions for learning latent variable models", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Anandkumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Ge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Hsu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Kakade", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Telgarsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "J. Mach. Learn. Res", |
|
"volume": "15", |
|
"issue": "1", |
|
"pages": "2773--2832", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anandkumar, A., Ge, R., Hsu, D., Kakade, S. M., and Telgarsky, M. (2014). Tensor decompositions for learning latent variable models. J. Mach. Learn. Res., 15(1):2773-2832, January.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Analyzing tensor power method dynamics in overcomplete regime", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Anandkumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Ge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Janzamin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "J. Mach. Learn. Res", |
|
"volume": "18", |
|
"issue": "1", |
|
"pages": "752--791", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anandkumar, A., Ge, R., and Janzamin, M. (2017). An- alyzing tensor power method dynamics in overcomplete regime. J. Mach. Learn. Res., 18(1):752-791, January.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Bengali speech corpus for continuous auutomatic speech recognition system", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Mandal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mitra", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "2011 International Conference on Speech Database and Assessments (Oriental COCOSDA)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "51--55", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Das, B., Mandal, S., and Mitra, P. (2011). Bengali speech corpus for continuous auutomatic speech recognition system. In 2011 International Conference on Speech Database and Assessments (Oriental COCOSDA), pages 51-55, Oct.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "An open-source speaker gender detection framework for monitoring gender equality", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Doukhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Carrive", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Vallet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Larcher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Meignier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Doukhan, D., Carrive, J., Vallet, F., Larcher, A., and Meignier, S. (2018). An open-source speaker gender de- tection framework for monitoring gender equality. April.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Learning mixtures of spherical gaussians: Moment methods and spectral decompositions", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Hsu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Kakade", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 4th Conference on Innovations in Theoretical Computer Science, ITCS '13", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hsu, D. and Kakade, S. M. (2013). Learning mixtures of spherical gaussians: Moment methods and spectral de- compositions. In Proceedings of the 4th Conference on Innovations in Theoretical Computer Science, ITCS '13, pages 11-20, New York, NY, USA. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A support vector machine-based voice activity detection employing effective feature vectors", |
|
"authors": [ |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Jo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Park", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chang", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "IEICE Transactions", |
|
"volume": "", |
|
"issue": "6", |
|
"pages": "2090--2093", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jo, Q., Park, Y., Lee, K., and Chang, J. (2008). A sup- port vector machine-based voice activity detection em- ploying effective feature vectors. IEICE Transactions, 91-B(6):2090-2093.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Tensor decompositions and applications", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Kolda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Bader", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "SIAM Rev", |
|
"volume": "51", |
|
"issue": "3", |
|
"pages": "455--500", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kolda, T. G. and Bader, B. W. (2009). Tensor decomposi- tions and applications. SIAM Rev., 51(3):455-500, Au- gust.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Gdnn: a genderdependent neural network for continuous speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Konig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Morgan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "Proceedings 1992] IJCNN International Joint Conference on Neural Networks", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "332--337", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Konig, Y. and Morgan, N. (1992). Gdnn: a gender- dependent neural network for continuous speech recog- nition. In [Proceedings 1992] IJCNN International Joint Conference on Neural Networks, volume 2, pages 332- 337 vol.2, June.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Phone-context specific gender-dependent acoustic-models for continuous speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "Lek-Heng", |
|
"middle": [], |
|
"last": "Lim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": ";", |
|
"middle": [], |
|
"last": "Dec", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Neti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "1st IEEE International Workshop on Computational Advances in Multi-Sensor Adaptive Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "192--198", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lek-Heng Lim. (2005). Singular values and eigenvalues of tensors: a variational approach. In 1st IEEE Interna- tional Workshop on Computational Advances in Multi- Sensor Adaptive Processing, 2005., pages 129-132, Dec. Neti, C. and Roukos, S. (1997). Phone-context spe- cific gender-dependent acoustic-models for continuous speech recognition. In 1997 IEEE Workshop on Auto- matic Speech Recognition and Understanding Proceed- ings, pages 192-198, Dec.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Language independent gender identification", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Parris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Carey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Garofolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Lamel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Fisher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Fiscus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Pallett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Dahlgren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Zue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing Conference Proceedings", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Parris, E. S. and Carey, M. J. (1996). Language indepen- dent gender identification. In 1996 IEEE International Conference on Acoustics, Speech, and Signal Processing Conference Proceedings, volume 2, pages 685-688 vol. 2, May. S Garofolo, J., Lamel, L., M Fisher, W., Fiscus, J., S. Pal- lett, D., L. Dahlgren, N., and Zue, V. (1992). Timit acoustic-phonetic continuous speech corpus. Linguistic Data Consortium, 11.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Tensor decomposition for signal processing and machine learning", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Sidiropoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "De Lathauwer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Fu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Papalexakis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Faloutsos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "IEEE Transactions on Signal Processing", |
|
"volume": "65", |
|
"issue": "13", |
|
"pages": "3551--3582", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sidiropoulos, N. D., De Lathauwer, L., Fu, X., Huang, K., Papalexakis, E. E., and Faloutsos, C. (2017). Tensor de- composition for signal processing and machine learning. IEEE Transactions on Signal Processing, 65(13):3551- 3582, July.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Musical genre classification of audio signals", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Tzanetakis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Cook", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "IEEE Transactions on Speech and Audio Processing", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "293--302", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tzanetakis, G. and Cook, P. (2002). Musical genre classi- fication of audio signals. IEEE Transactions on Speech and Audio Processing, 10:293-302, Jan.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Gender identification of a speaker using mfcc and gmm", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Y\u00fccesoy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Nabiyev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "626--629", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y\u00fccesoy, E. and Nabiyev, V. (2013). Gender identification of a speaker using mfcc and gmm. pages 626-629, Nov.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Performance comparison of GMM and the proposed tensor based approach.", |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Description of datasets.", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Performance with respect to different sizes of feature vector (d).", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |