#include void NeuralNetwork::Learning::BatchPropagation::teach(const std::vector &input, const std::vector &expectation) { _network.computeOutput(input); if(!init) { resize(); init = true; } computeSlopes(expectation); computeDeltas(input); if(++_currentBatchSize >= _batchSize) { finishTeaching(); } } void NeuralNetwork::Learning::BatchPropagation::finishTeaching() { updateWeightsAndEndBatch(); _currentBatchSize=0; } void NeuralNetwork::Learning::BatchPropagation::computeSlopes(const std::vector &expectation) { const auto& outputLayer=_network[_network.size()-1]; for(std::size_t j=1;joperator()( expectation[j-1], neuron.output())* neuron.getActivationFunction().derivatedOutput(neuron.value(),neuron.output()); } for(int layerIndex=static_cast(_network.size()-2);layerIndex>0;layerIndex--) { auto &layer=_network[layerIndex]; for(std::size_t j=1;j &input) { for(std::size_t layerIndex=1;layerIndex<_network.size();layerIndex++) { auto &layer=_network[layerIndex]; auto &prevLayer=_network[layerIndex-1]; std::size_t prevLayerSize=prevLayer.size(); std::size_t layerSize=layer.size(); for(std::size_t j=1;j 0) { for(std::size_t j = 0; j < _gradients[i].size(); j++) { _gradients[i][j].resize(_network[i - 1].size()); std::fill(_gradients[i][j].begin(), _gradients[i][j].end(), 0.0); } } } }