modified learning algorithms

This commit is contained in:
2016-02-24 17:30:18 +01:00
parent bf4387a98f
commit 47de0fa08b
8 changed files with 58 additions and 65 deletions

View File

@@ -13,13 +13,14 @@ namespace Learning {
class BackPropagation {
public:
BackPropagation(): learningCoefficient(0.4) {
inline BackPropagation(FeedForward::Network &feedForwardNetwork): network(feedForwardNetwork), learningCoefficient(0.4), deltas() {
resize();
}
virtual ~BackPropagation() {
}
void teach(FeedForward::Network &n,const std::vector<float> &input, const std::vector<float> &output);
void teach(const std::vector<float> &input, const std::vector<float> &output);
inline virtual void setLearningCoefficient (const float& coefficient) { learningCoefficient=coefficient; }
@@ -27,7 +28,22 @@ namespace Learning {
inline virtual float correction(const float & expected, const float &computed) const {
return expected-computed;
};
inline void resize() {
if(deltas.size()!=network.size())
deltas.resize(network.size());
for(std::size_t i=0; i < network.size(); i++) {
if(deltas[i].size()!=network[i].size())
deltas[i].resize(network[i].size());
}
}
FeedForward::Network &network;
float learningCoefficient;
std::vector<std::vector<float>> deltas;
};
}
}

View File

@@ -11,7 +11,7 @@ namespace Learning {
class OpticalBackPropagation : public BackPropagation {
public:
OpticalBackPropagation(): BackPropagation() {
OpticalBackPropagation(FeedForward::Network &feedForwardNetwork): BackPropagation(feedForwardNetwork) {
}
virtual ~OpticalBackPropagation() {

View File

@@ -21,6 +21,7 @@ float NeuralNetwork::BasisFunction::Linear::operator()(const std::vector<float>
#ifndef USE_FMA
__m256 tmp;
#endif
for(size_t k=0;k<alignedPrev;k+=8) {
//TODO: assignement!! -- possible speedup
#ifdef USE_FMA

View File

@@ -3,17 +3,11 @@
#include <cassert>
#include <immintrin.h>
void NeuralNetwork::Learning::BackPropagation::teach(FeedForward::Network &network,const std::vector<float> &input, const std::vector<float> &expectation) {
void NeuralNetwork::Learning::BackPropagation::teach(const std::vector<float> &input, const std::vector<float> &expectation) {
network.computeOutput(input);
std::vector<std::vector<float>> deltas;
deltas.resize(network.size());
for(std::size_t i=0; i < network.size(); i++) {
deltas[i].resize(network[i].size());
deltas[i][0]=0.0;
}
resize();
auto& outputLayer=network[network.size()-1];
for(std::size_t j=1;j<outputLayer.size();j++) {
@@ -43,23 +37,6 @@ void NeuralNetwork::Learning::BackPropagation::teach(FeedForward::Network &netwo
std::size_t prevLayerSize=prevLayer.size();
std::size_t layerSize=layer.size();
/*
#ifdef USE_AVX
std::size_t alignedPrev=layerSize-layerSize%8;
__m256 learningCoefficientAVX=_mm256_set_ps(learningCoefficient, learningCoefficient, learningCoefficient, learningCoefficient,
learningCoefficient, learningCoefficient, learningCoefficient, learningCoefficient);
float* tmp =deltas[layerIndex].data();
for(std::size_t j=1;j<alignedPrev;j+=8) {
_mm256_storeu_ps(tmp+j,_mm256_mul_ps(learningCoefficientAVX,_mm256_loadu_ps(tmp+j)));
}
for(std::size_t j =alignedPrev; j < layerSize;j++) {
deltas[layerIndex][j]*=learningCoefficient;
}
#endif
*/
for(std::size_t j=1;j<layerSize;j++) {
deltas[layerIndex][j]*=learningCoefficient;

View File

@@ -13,12 +13,12 @@ int main() {
n.randomizeWeights();
NeuralNetwork::Learning::BackPropagation prop;
NeuralNetwork::Learning::BackPropagation prop(n);
for(int i=0;i<10000;i++) {
prop.teach(n,{1,0},{1});
prop.teach(n,{1,1},{0});
prop.teach(n,{0,0},{0});
prop.teach(n,{0,1},{1});
prop.teach({1,0},{1});
prop.teach({1,1},{0});
prop.teach({0,0},{0});
prop.teach({0,1},{1});
}
{
@@ -49,12 +49,12 @@ int main() {
n.randomizeWeights();
NeuralNetwork::Learning::BackPropagation prop;
NeuralNetwork::Learning::BackPropagation prop(n);
for(int i=0;i<10000;i++) {
prop.teach(n,{1,1},{1});
prop.teach(n,{0,0},{0});
prop.teach(n,{0,1},{0});
prop.teach(n,{1,0},{0});
prop.teach({1,1},{1});
prop.teach({0,0},{0});
prop.teach({0,1},{0});
prop.teach({1,0},{0});
}
{
@@ -85,12 +85,12 @@ int main() {
n.randomizeWeights();
NeuralNetwork::Learning::BackPropagation prop;
NeuralNetwork::Learning::BackPropagation prop(n);
for(int i=0;i<10000;i++) {
prop.teach(n,{1,1},{0});
prop.teach(n,{0,0},{1});
prop.teach(n,{0,1},{1});
prop.teach(n,{1,0},{1});
prop.teach({1,1},{0});
prop.teach({0,0},{1});
prop.teach({0,1},{1});
prop.teach({1,0},{1});
}
{

View File

@@ -15,12 +15,12 @@ int main() {
n.randomizeWeights();
NeuralNetwork::Learning::BackPropagation prop;
NeuralNetwork::Learning::BackPropagation prop(n);
for(int i=0;i<100;i++) {
prop.teach(n,{1,0},{1});
prop.teach(n,{1,1},{0});
prop.teach(n,{0,0},{0});
prop.teach(n,{0,1},{1});
prop.teach({1,0},{1});
prop.teach({1,1},{0});
prop.teach({0,0},{0});
prop.teach({0,1},{1});
}
}
}

View File

@@ -13,12 +13,12 @@ int main() {
n.randomizeWeights();
NeuralNetwork::Learning::OpticalBackPropagation prop;
NeuralNetwork::Learning::OpticalBackPropagation prop(n);
for(int i=0;i<10000;i++) {
prop.teach(n,{1,0},{1});
prop.teach(n,{1,1},{0});
prop.teach(n,{0,0},{0});
prop.teach(n,{0,1},{1});
prop.teach({1,0},{1});
prop.teach({1,1},{0});
prop.teach({0,0},{0});
prop.teach({0,1},{1});
}
{
@@ -49,12 +49,12 @@ int main() {
n.randomizeWeights();
NeuralNetwork::Learning::OpticalBackPropagation prop;
NeuralNetwork::Learning::OpticalBackPropagation prop(n);
for(int i=0;i<10000;i++) {
prop.teach(n,{1,1},{1});
prop.teach(n,{0,0},{0});
prop.teach(n,{0,1},{0});
prop.teach(n,{1,0},{0});
prop.teach({1,1},{1});
prop.teach({0,0},{0});
prop.teach({0,1},{0});
prop.teach({1,0},{0});
}
{
@@ -85,12 +85,12 @@ int main() {
n.randomizeWeights();
NeuralNetwork::Learning::OpticalBackPropagation prop;
NeuralNetwork::Learning::OpticalBackPropagation prop(n);
for(int i=0;i<10000;i++) {
prop.teach(n,{1,1},{0});
prop.teach(n,{0,0},{1});
prop.teach(n,{0,1},{1});
prop.teach(n,{1,0},{1});
prop.teach({1,1},{0});
prop.teach({0,0},{1});
prop.teach({0,1},{1});
prop.teach({1,0},{1});
}
{

View File

@@ -15,7 +15,6 @@ int main() {
for(size_t i=0;i<solutions.size();i++) {
float res= a.computeOutput({1,0.7})[0];
float resA=solutions[i];
assert(res > solutions[i]*0.999 && res < solutions[i]*1.001);
}
}