@article { author = {Aghajani, K. and Esmaili Paeen Afrakoti, I.}, title = {Speech Emotion Recognition Using Scalogram Based Deep Structure}, journal = {International Journal of Engineering}, volume = {33}, number = {2}, pages = {285-292}, year = {2020}, publisher = {Materials and Energy Research Center}, issn = {1025-2495}, eissn = {1735-9244}, doi = {10.5829/ije.2020.33.02b.13}, abstract = {Speech Emotion Recognition (SER) is an important part of speech-based Human-Computer Interface (HCI) applications. Previous SER methods rely on the extraction of features and training an appropriate classifier. However, most of those features can be affected by emotionally irrelevant factors such as gender, speaking styles and environment. Here, an SER method has been proposed based on a concatenated Convolutional Neural Network (CNN) and a Recurrent Neural Network (RNN). The CNN can be used to learn local salient features from speech signals, images, and videos. Moreover, the RNNs have been used in many sequential data processing tasks in order to learn long-term dependencies between the local features. A combination of these two gives us the advantage of the strengths of both networks. In the proposed method, CNN has been applied directly to a scalogram of speech signals. Then, the attention-mechanism-based RNN model was used to learn long-term temporal relationships of the learned features. Experiments on various data such as RAVDESS, SAVEE, and Emo-DB demonstrate the effectiveness of the proposed SER method.}, keywords = {continuous wavelet transform,Emotion Recognition,convolutional neural network,Recurrent Network,Long-short Term Memory}, url = {https://www.ije.ir/article_103377.html}, eprint = {https://www.ije.ir/article_103377_794721920c4ee4cdbf714566065b233e.pdf} }