quicprop implementation changed

This commit is contained in:
2016-05-09 22:46:41 +02:00
parent 1bdbb81bf6
commit 0f68dbf68d

View File

@@ -5,6 +5,8 @@
void NeuralNetwork::Learning::QuickPropagation::updateWeights(const std::vector<float> &input) {
float shrinkFactor=_maxChange/(_maxChange+1.0);
for(std::size_t layerIndex=1;layerIndex<network.size();layerIndex++) {
auto &layer=network[layerIndex];
auto &prevLayer=network[layerIndex-1];
@@ -14,13 +16,22 @@ void NeuralNetwork::Learning::QuickPropagation::updateWeights(const std::vector<
for(std::size_t j=1;j<layerSize;j++) {
float newChange=slopes[layerIndex][j]/(previousSlopes[layerIndex][j]-slopes[layerIndex][j]) * lastWeightChange[layerIndex][j];
float newChange=0;
// according to original paper
newChange+= slopes[layerIndex][j]*_epsilon;
if(fabs (lastWeightChange[layerIndex][j])> 0.0001) {
if(std::signbit(lastWeightChange[layerIndex][j]) == std::signbit(slopes[layerIndex][j])) {
newChange+= slopes[layerIndex][j]*_epsilon;
if(newChange > lastWeightChange[layerIndex][j]*_maxChange) {
newChange=lastWeightChange[layerIndex][j];
if(fabs(slopes[layerIndex][j]) > fabs(shrinkFactor * previousSlopes[layerIndex][j])) {
newChange += _maxChange * lastWeightChange[layerIndex][j];
}else {
newChange+=slopes[layerIndex][j]/(previousSlopes[layerIndex][j]-slopes[layerIndex][j]) * lastWeightChange[layerIndex][j];
}
} else {
newChange+=slopes[layerIndex][j]/(previousSlopes[layerIndex][j]-slopes[layerIndex][j]) * lastWeightChange[layerIndex][j];
}
} else {
newChange+= slopes[layerIndex][j]*_epsilon;
}
weightChange[layerIndex][j]=newChange;