@inproceedings{0bea8956161645bbb73817c265e7aa05,
title = "Magnified gradient function to improve first-order gradient-based learning algorithms",
abstract = "In this paper, we propose a new approach to improve the performance of existing first-order gradient-based fast learning algorithms in terms of speed and global convergence capability. The idea is to magnify the gradient terms of the activation function so that fast learning speed and global convergence can be achieved. The approach can be applied to existing gradient-based algorithms. Simulation results show that this approach can significantly speed up the convergence rate and increase the global convergence capability of existing popular first-order gradient-based fast learning algorithms for multi-layer feed-forward neural networks.",
keywords = "backpropagation, gradient-based algorithms, magnified gradient function, Quickprop, Rprop",
author = "Ng, {Sin Chun} and Cheung, {Chi Chung} and Lui, {Andrew Kwok Fai} and Shensheng Xu",
year = "2012",
month = jul,
day = "11",
doi = "10.1007/978-3-642-31346-2_51",
language = "English",
isbn = "9783642313455",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
number = "PART 1",
pages = "448--457",
booktitle = "Advances in Neural Networks, ISNN 2012 - 9th International Symposium on Neural Networks, Proceedings",
edition = "PART 1",
note = "9th International Symposium on Neural Networks, ISNN 2012 ; Conference date: 11-07-2012 Through 14-07-2012",
}