quicprop implementation changed

This commit is contained in:
2016-05-09 22:46:41 +02:00
parent 1bdbb81bf6
commit 0f68dbf68d

View File

@@ -5,6 +5,8 @@
void NeuralNetwork::Learning::QuickPropagation::updateWeights(const std::vector<float> &input) { void NeuralNetwork::Learning::QuickPropagation::updateWeights(const std::vector<float> &input) {
float shrinkFactor=_maxChange/(_maxChange+1.0);
for(std::size_t layerIndex=1;layerIndex<network.size();layerIndex++) { for(std::size_t layerIndex=1;layerIndex<network.size();layerIndex++) {
auto &layer=network[layerIndex]; auto &layer=network[layerIndex];
auto &prevLayer=network[layerIndex-1]; auto &prevLayer=network[layerIndex-1];
@@ -14,13 +16,22 @@ void NeuralNetwork::Learning::QuickPropagation::updateWeights(const std::vector<
for(std::size_t j=1;j<layerSize;j++) { for(std::size_t j=1;j<layerSize;j++) {
float newChange=slopes[layerIndex][j]/(previousSlopes[layerIndex][j]-slopes[layerIndex][j]) * lastWeightChange[layerIndex][j]; float newChange=0;
// according to original paper if(fabs (lastWeightChange[layerIndex][j])> 0.0001) {
newChange+= slopes[layerIndex][j]*_epsilon; if(std::signbit(lastWeightChange[layerIndex][j]) == std::signbit(slopes[layerIndex][j])) {
newChange+= slopes[layerIndex][j]*_epsilon;
if(newChange > lastWeightChange[layerIndex][j]*_maxChange) { if(fabs(slopes[layerIndex][j]) > fabs(shrinkFactor * previousSlopes[layerIndex][j])) {
newChange=lastWeightChange[layerIndex][j]; newChange += _maxChange * lastWeightChange[layerIndex][j];
}else {
newChange+=slopes[layerIndex][j]/(previousSlopes[layerIndex][j]-slopes[layerIndex][j]) * lastWeightChange[layerIndex][j];
}
} else {
newChange+=slopes[layerIndex][j]/(previousSlopes[layerIndex][j]-slopes[layerIndex][j]) * lastWeightChange[layerIndex][j];
}
} else {
newChange+= slopes[layerIndex][j]*_epsilon;
} }
weightChange[layerIndex][j]=newChange; weightChange[layerIndex][j]=newChange;