@inproceedings{4373861808364fad80cc35f4b6770c28,
title = "Applying batch normalization to hybrid NN-HMM model for speech recognition",
abstract = "Batch Normalization has showed success in image classification and other image processing areas by reducing internal covariate shift in deep network model{\textquoteright}s training procedure. In this paper, we propose to apply batch normalization to speech recognition within the hybrid NNHMM model. We evaluate the performance of this new method in the acoustic model of the hybrid system with a speaker-independent speech recognition task using some Chinese datasets. Compared to the former best model we used in the Chinese datasets, it shows that with batch normalization we can reach lower word error rate (WER) of 8\%–13\% relatively, meanwhile we just need 60\% iterations of original model to finish the training procedure.",
author = "Hongjian Zhan and Guilin Chen and Yue Lu",
note = "Publisher Copyright: {\textcopyright} Springer Nature Singapore Pte Ltd. 2016.",
year = "2016",
doi = "10.1007/978-981-10-3005-5\_35",
language = "英语",
isbn = "9789811030048",
series = "Communications in Computer and Information Science",
publisher = "Springer Verlag",
pages = "427--435",
editor = "Tieniu Tan and Xilin Chen and Xuelong Li and Jian Yang and Hong Cheng and Jie Zhou",
booktitle = "Pattern Recognition - 7th Chinese Conference, CCPR 2016, Proceedings",
address = "德国",
}