modified learning algorithms

This commit is contained in:
2016-02-24 17:30:18 +01:00
parent bf4387a98f
commit 47de0fa08b
8 changed files with 58 additions and 65 deletions

View File

@@ -21,6 +21,7 @@ float NeuralNetwork::BasisFunction::Linear::operator()(const std::vector<float>
#ifndef USE_FMA
__m256 tmp;
#endif
for(size_t k=0;k<alignedPrev;k+=8) {
//TODO: assignement!! -- possible speedup
#ifdef USE_FMA

View File

@@ -3,17 +3,11 @@
#include <cassert>
#include <immintrin.h>
void NeuralNetwork::Learning::BackPropagation::teach(FeedForward::Network &network,const std::vector<float> &input, const std::vector<float> &expectation) {
void NeuralNetwork::Learning::BackPropagation::teach(const std::vector<float> &input, const std::vector<float> &expectation) {
network.computeOutput(input);
std::vector<std::vector<float>> deltas;
deltas.resize(network.size());
for(std::size_t i=0; i < network.size(); i++) {
deltas[i].resize(network[i].size());
deltas[i][0]=0.0;
}
resize();
auto& outputLayer=network[network.size()-1];
for(std::size_t j=1;j<outputLayer.size();j++) {
@@ -43,23 +37,6 @@ void NeuralNetwork::Learning::BackPropagation::teach(FeedForward::Network &netwo
std::size_t prevLayerSize=prevLayer.size();
std::size_t layerSize=layer.size();
/*
#ifdef USE_AVX
std::size_t alignedPrev=layerSize-layerSize%8;
__m256 learningCoefficientAVX=_mm256_set_ps(learningCoefficient, learningCoefficient, learningCoefficient, learningCoefficient,
learningCoefficient, learningCoefficient, learningCoefficient, learningCoefficient);
float* tmp =deltas[layerIndex].data();
for(std::size_t j=1;j<alignedPrev;j+=8) {
_mm256_storeu_ps(tmp+j,_mm256_mul_ps(learningCoefficientAVX,_mm256_loadu_ps(tmp+j)));
}
for(std::size_t j =alignedPrev; j < layerSize;j++) {
deltas[layerIndex][j]*=learningCoefficient;
}
#endif
*/
for(std::size_t j=1;j<layerSize;j++) {
deltas[layerIndex][j]*=learningCoefficient;