learning: naming in bp changed and qp modified
This commit is contained in:
@@ -9,30 +9,12 @@ void NeuralNetwork::Learning::BackPropagation::teach(const std::vector<float> &i
|
||||
|
||||
resize();
|
||||
|
||||
auto& outputLayer=network[network.size()-1];
|
||||
for(std::size_t j=1;j<outputLayer.size();j++) {
|
||||
auto& neuron = outputLayer[j];
|
||||
deltas[network.size()-1][j]=correctionFunction->operator()( expectation[j-1], neuron.output())*
|
||||
neuron.getActivationFunction().derivatedOutput(neuron.value(),neuron.output());
|
||||
}
|
||||
|
||||
for(int layerIndex=static_cast<int>(network.size()-2);layerIndex>0;layerIndex--) {
|
||||
auto &layer=network[layerIndex];
|
||||
|
||||
for(std::size_t j=1;j<layer.size();j++) {
|
||||
float deltasWeight = 0;
|
||||
|
||||
for(std::size_t k=1;k<network[layerIndex+1].size();k++) {
|
||||
deltasWeight+=deltas[layerIndex+1][k]* network[layerIndex+1][k].weight(j);
|
||||
}
|
||||
|
||||
deltas[layerIndex][j]=deltasWeight*layer[j].getActivationFunction().derivatedOutput(layer[j].value(),layer[j].output());
|
||||
}
|
||||
}
|
||||
computeDeltas(expectation);
|
||||
|
||||
updateWeights(input);
|
||||
}
|
||||
|
||||
|
||||
void NeuralNetwork::Learning::BackPropagation::updateWeights(const std::vector<float> &input) {
|
||||
|
||||
for(std::size_t layerIndex=1;layerIndex<network.size();layerIndex++) {
|
||||
@@ -44,18 +26,43 @@ void NeuralNetwork::Learning::BackPropagation::updateWeights(const std::vector<f
|
||||
|
||||
for(std::size_t j=1;j<layerSize;j++) {
|
||||
|
||||
deltas[layerIndex][j]*=learningCoefficient;
|
||||
float delta =slopes[layerIndex][j]*learningCoefficient;
|
||||
|
||||
layer[j].weight(0)+=deltas[layerIndex][j];
|
||||
layer[j].weight(0)+=delta;
|
||||
|
||||
for(std::size_t k=1;k<prevLayerSize;k++) {
|
||||
if(layerIndex==1) {
|
||||
layer[j].weight(k)+=deltas[layerIndex][j]*input[k-1];
|
||||
layer[j].weight(k)+=delta*input[k-1];
|
||||
} else {
|
||||
layer[j].weight(k)+=deltas[layerIndex][j]*prevLayer[k].output();
|
||||
layer[j].weight(k)+=delta*prevLayer[k].output();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void NeuralNetwork::Learning::BackPropagation::computeDeltas(const std::vector<float> &expectation) {
|
||||
auto& outputLayer=network[network.size()-1];
|
||||
for(std::size_t j=1;j<outputLayer.size();j++) {
|
||||
auto& neuron = outputLayer[j];
|
||||
slopes[network.size()-1][j]=correctionFunction->operator()( expectation[j-1], neuron.output())*
|
||||
neuron.getActivationFunction().derivatedOutput(neuron.value(),neuron.output());
|
||||
}
|
||||
|
||||
for(int layerIndex=static_cast<int>(network.size()-2);layerIndex>0;layerIndex--) {
|
||||
auto &layer=network[layerIndex];
|
||||
|
||||
for(std::size_t j=1;j<layer.size();j++) {
|
||||
float deltasWeight = 0;
|
||||
|
||||
for(std::size_t k=1;k<network[layerIndex+1].size();k++) {
|
||||
deltasWeight+=slopes[layerIndex+1][k]* network[layerIndex+1][k].weight(j);
|
||||
}
|
||||
|
||||
slopes[layerIndex][j]=deltasWeight*layer[j].getActivationFunction().derivatedOutput(layer[j].value(),layer[j].output());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -14,22 +14,29 @@ void NeuralNetwork::Learning::QuickPropagation::updateWeights(const std::vector<
|
||||
|
||||
for(std::size_t j=1;j<layerSize;j++) {
|
||||
|
||||
//TODO: is this correct??
|
||||
float delta=deltas[layerIndex][j]/(deltasPrev[layerIndex][j]-deltas[layerIndex][j]);
|
||||
float newChange=slopes[layerIndex][j]/(previousSlopes[layerIndex][j]-slopes[layerIndex][j]) * lastWeightChange[layerIndex][j];
|
||||
|
||||
deltas[layerIndex][j]=delta;
|
||||
// according to original paper
|
||||
newChange+= slopes[layerIndex][j]*_epsilon;
|
||||
|
||||
layer[j].weight(0)+=delta;
|
||||
if(newChange > lastWeightChange[layerIndex][j]*_maxChange) {
|
||||
newChange=lastWeightChange[layerIndex][j];
|
||||
}
|
||||
|
||||
weightChange[layerIndex][j]=newChange;
|
||||
|
||||
layer[j].weight(0)+=newChange;
|
||||
|
||||
for(std::size_t k=1;k<prevLayerSize;k++) {
|
||||
if(layerIndex==1) {
|
||||
layer[j].weight(k)+=delta*input[k-1];
|
||||
layer[j].weight(k)+=newChange*(input[k-1]);
|
||||
} else {
|
||||
layer[j].weight(k)+=delta*prevLayer[k].output();
|
||||
layer[j].weight(k)+=newChange*(prevLayer[k].output());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
deltas.swap(deltasPrev);
|
||||
slopes.swap(previousSlopes);
|
||||
weightChange.swap(lastWeightChange);
|
||||
}
|
||||
Reference in New Issue
Block a user