moving atributes to NeuronInterface for speed
This commit is contained in:
@@ -4,7 +4,7 @@ void NeuralNetwork::FeedForward::Layer::solve(const std::vector<float> &input, s
|
||||
output.resize(neurons.size());
|
||||
|
||||
for(auto&neuron: neurons) {
|
||||
output[neuron->id()] = neuron->operator()(input);
|
||||
output[neuron->id] = neuron->operator()(input);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#include <NeuralNetwork/Learning/BackPropagation.h>
|
||||
|
||||
#include <cassert>
|
||||
#include <immintrin.h>
|
||||
|
||||
void NeuralNetwork::Learning::BackPropagation::teach(FeedForward::Network &network,const std::vector<float> &input, const std::vector<float> &expectation) {
|
||||
|
||||
@@ -26,11 +27,12 @@ void NeuralNetwork::Learning::BackPropagation::teach(FeedForward::Network &netwo
|
||||
|
||||
for(std::size_t j=1;j<layer.size();j++) {
|
||||
float deltasWeight = 0;
|
||||
|
||||
for(std::size_t k=1;k<network[layerIndex+1].size();k++) {
|
||||
deltasWeight+=deltas[layerIndex+1][k]* network[layerIndex+1][k].weight(j);
|
||||
}
|
||||
float newDelta=deltasWeight*layer[j].getActivationFunction().derivatedOutput(layer[j].value(),layer[j].output());
|
||||
deltas[layerIndex][j]=newDelta;
|
||||
|
||||
deltas[layerIndex][j]=deltasWeight*layer[j].getActivationFunction().derivatedOutput(layer[j].value(),layer[j].output());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,12 +40,33 @@ void NeuralNetwork::Learning::BackPropagation::teach(FeedForward::Network &netwo
|
||||
auto &layer=network[layerIndex];
|
||||
auto &prevLayer=network[layerIndex-1];
|
||||
|
||||
std::size_t max=prevLayer.size();
|
||||
std::size_t prevLayerSize=prevLayer.size();
|
||||
std::size_t layerSize=layer.size();
|
||||
|
||||
for(std::size_t j=1;j<layer.size();j++) {
|
||||
/*
|
||||
#ifdef USE_AVX
|
||||
|
||||
std::size_t alignedPrev=layerSize-layerSize%8;
|
||||
|
||||
__m256 learningCoefficientAVX=_mm256_set_ps(learningCoefficient, learningCoefficient, learningCoefficient, learningCoefficient,
|
||||
learningCoefficient, learningCoefficient, learningCoefficient, learningCoefficient);
|
||||
|
||||
float* tmp =deltas[layerIndex].data();
|
||||
for(std::size_t j=1;j<alignedPrev;j+=8) {
|
||||
_mm256_storeu_ps(tmp+j,_mm256_mul_ps(learningCoefficientAVX,_mm256_loadu_ps(tmp+j)));
|
||||
}
|
||||
for(std::size_t j =alignedPrev; j < layerSize;j++) {
|
||||
deltas[layerIndex][j]*=learningCoefficient;
|
||||
}
|
||||
#endif
|
||||
*/
|
||||
for(std::size_t j=1;j<layerSize;j++) {
|
||||
|
||||
deltas[layerIndex][j]*=learningCoefficient;
|
||||
|
||||
layer[j].weight(0)+=deltas[layerIndex][j];
|
||||
for(std::size_t k=1;k<max;k++) {
|
||||
|
||||
for(std::size_t k=1;k<prevLayerSize;k++) {
|
||||
if(layerIndex==1) {
|
||||
layer[j].weight(k)+=deltas[layerIndex][j]*input[k-1];
|
||||
} else {
|
||||
@@ -51,5 +74,6 @@ void NeuralNetwork::Learning::BackPropagation::teach(FeedForward::Network &netwo
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,7 +6,7 @@ std::string NeuralNetwork::Neuron::stringify(const std::string &prefix) const {
|
||||
|
||||
out << prefix << "{\n";
|
||||
out << prefix << "\t\"class\": \"NeuralNetwork::Neuron\",\n";
|
||||
out << prefix << "\t\"id\": " << id() << ",\n";
|
||||
out << prefix << "\t\"id\": " << id << ",\n";
|
||||
out << prefix << "\t\"bias\": " << getBias() << ",\n";
|
||||
out << prefix << "\t\"output\": " << output() << ",\n";
|
||||
out << prefix << "\t\"value\": " << value() << ",\n";
|
||||
|
||||
@@ -6,7 +6,7 @@ std::vector<float> NeuralNetwork::Recurrent::Network::computeOutput(const std::v
|
||||
if(outputs.size() != neurons.size()) {
|
||||
outputs.resize(neurons.size());
|
||||
for(auto &neuron:neurons) {
|
||||
outputs[neuron->id()]=neuron->output();
|
||||
outputs[neuron->id]=neuron->output();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user