new test, and multithreading in FFQ, MT in BP not working

This commit is contained in:
2014-11-12 22:26:38 +01:00
parent f81b5a5b5f
commit 24a72dbffb
10 changed files with 194 additions and 45 deletions

View File

@@ -1,4 +1,5 @@
#include "FeedForwardQuick"
#include <thread>
using namespace Shin::NeuronNetwork;
@@ -72,18 +73,55 @@ Solution FeedForwardNetworkQuick::solve(const Problem& p)
for(register size_t i=0;i<layers;i++)
{
double* newSolution= sums[i+1];//new bool[layerSizes[i]];
for( size_t j=1;j<layerSizes[i];j++)
if(threads > 1 && layerSizes[i] > 600)
{
newSolution[j]=sol[0]*weights[i][j][0];
register size_t k;
for(k=1;k<prevSize;k++)
std::vector<std::thread> th;
size_t s=1;
//TODO THIS IS NOT WORKING!!!
size_t step =layerSizes[i]/threads;
for(size_t t=1;t<=threads;t++)
{
if(i==0)
//TODO do i need it to check?
if(s>=layerSizes[i])
break;
th.push_back(std::thread([i,this,newSolution,prevSize,sol](size_t from, size_t to)->void{
for( size_t j=from;j<to;j++)
{
newSolution[j]=sol[0]*weights[i][j][0];
register size_t k;
for(k=1;k<prevSize;k++)
{
if(i==0)
{
newSolution[j]+=sol[k]*weights[i][j][k];
}else
{
newSolution[j]+=(1.0/(1.0+exp(-lambda*sol[k])))*weights[i][j][k];
}
}
}
},s,t==threads?layerSizes[i]:s+step));//{}
s+=step;
}
for (auto& thr : th)
thr.join();
}else
{
for( size_t j=1;j<layerSizes[i];j++)
{
newSolution[j]=sol[0]*weights[i][j][0];
register size_t k;
for(k=1;k<prevSize;k++)
{
newSolution[j]+=sol[k]*weights[i][j][k];
}else
{
newSolution[j]+=(1.0/(1.0+exp(-lambda*sol[k])))*weights[i][j][k];
if(i==0)
{
newSolution[j]+=sol[k]*weights[i][j][k];
}else
{
newSolution[j]+=(1.0/(1.0+exp(-lambda*sol[k])))*weights[i][j][k];
}
}
}
}

View File

@@ -30,15 +30,9 @@ namespace NeuronNetwork
void setPotential(double p) { *potential=p;}
double getWeight(unsigned int i ) { return weights[i];}
void setWeight(unsigned int i,double p) { weights[i]=p; }
inline double output()
{
return 1.0/(1.0+(exp(-lambda*input())));
return input();
// register double tmp=;
// return NAN==tmp?0:tmp;
/* > *potential? 1 :0;*/ }
inline double input() { return *sum; }
inline double derivatedOutput() { return lambda*output()*(1.0-output()); };
inline double output() const { return 1.0/(1.0+(exp(-lambda*input()))); }
inline double input() const { return *sum; }
inline double derivatedOutput() const { return lambda*output()*(1.0-output()); }
protected:
double *potential;
double *weights;
@@ -110,7 +104,7 @@ namespace NeuronNetwork
virtual Solution solve(const Problem& p) override;
unsigned size() { return layers;}
FFLayer* operator[](int l);
void setThreads(unsigned t) {threads=t;}
protected:
private:
FFLayer **ffLayers;
@@ -122,6 +116,7 @@ namespace NeuronNetwork
size_t *layerSizes;
size_t layers;
double lambda;
unsigned threads=1;
};
}

View File

@@ -1,4 +1,5 @@
#include "./BackPropagation"
#include <thread>
Shin::NeuronNetwork::Learning::BackPropagation::BackPropagation(FeedForwardNetworkQuick &n): Supervised(n)
{
@@ -22,14 +23,43 @@ void Shin::NeuronNetwork::Learning::BackPropagation::propagate(const Shin::Neuro
}
}else
{
for(size_t j=1;j<network[i]->size();j++)
if(allowThreads)
{
register double deltasWeight = 0;
for(size_t k=1;k<network[i+1]->size();k++)
std::vector<std::thread> th;
int s=0;
//TODO THIS IS NOT WORKING!!!
#define THREADS 4
int step =network[i]->size()/THREADS;
for(int t=1;t<=THREADS;t++)
{
deltasWeight+=deltas[i+1][k]*network[i+1]->operator[](k)->getWeight(j);
if(s>=network[i]->size())
break;
th.push_back(std::thread([&i,this,&deltas](size_t from, size_t to)->void{
for(size_t j=from;j<to;j++)
{
register double deltasWeight = 0;
for(size_t k=1;k<this->network[i+1]->size();k++)
{
deltasWeight+=deltas[i+1][k]*this->network[i+1]->operator[](k)->getWeight(j);
}
//deltas[i][j]*=this->network[i]->operator[](j)->derivatedOutput(); // WHY THE HELL IS SEQ here??
}
},s,t==THREADS?network[i]->size():s+step));//{}
s+=step;
}
for (auto& thr : th)
thr.join();
}else
{
for(size_t j=0;j<network[i]->size();j++)
{
register double deltasWeight = 0;
for(size_t k=1;k<this->network[i+1]->size();k++)
{
deltasWeight+=deltas[i+1][k]*this->network[i+1]->operator[](k)->getWeight(j);
}
deltas[i][j]=deltasWeight*this->network[i]->operator[](j)->derivatedOutput();
}
deltas[i][j]=deltasWeight*network[i]->operator[](j)->derivatedOutput();
}
}
}

View File

@@ -36,9 +36,11 @@ namespace Learning
void setLearningCoeficient (double);
void allowEntropy() {entropy=1;}
void setEntropySize(int milipercents) { entropySize=milipercents; }
inline void allowThreading() {allowThreads=1; }
protected:
double learningCoeficient=0.4;
bool entropy=0;
bool allowThreads=0;
int entropySize=500;
};
}

View File

@@ -1,8 +1,13 @@
#include "./Reinforcement"
Shin::NeuronNetwork::Learning::Reinforcement::Reinforcement(Shin::NeuronNetwork::FeedForwardNetworkQuick& n): Unsupervised(n), p(n)
Shin::NeuronNetwork::Learning::Reinforcement::Reinforcement(Shin::NeuronNetwork::FeedForwardNetworkQuick& n): Unsupervised(n), p(new BackPropagation(n))
{
p.setLearningCoeficient(9);
p->setLearningCoeficient(9);
}
Shin::NeuronNetwork::Learning::Reinforcement::~Reinforcement()
{
delete p;
}
void Shin::NeuronNetwork::Learning::Reinforcement::setQualityFunction(std::function< double(const Problem&,const Solution&) > f)
@@ -30,7 +35,7 @@ double Shin::NeuronNetwork::Learning::Reinforcement::learn(const Shin::NeuronNet
}
for(register int i=abs((int)quality);i>=0;i--)
{
p.propagate(q);
p->propagate(q);
}
return quality;
}
@@ -44,3 +49,14 @@ double Shin::NeuronNetwork::Learning::Reinforcement::learnSet(const std::vector<
}
return err/problems.size();
}
void Shin::NeuronNetwork::Learning::Reinforcement::setCoef(double q)
{
p->setLearningCoeficient(q);
}
void Shin::NeuronNetwork::Learning::Reinforcement::setPropagator(Shin::NeuronNetwork::Learning::BackPropagation* prop)
{
delete p;
p=prop;
}

View File

@@ -41,16 +41,20 @@ namespace Learning
{
public:
Reinforcement(FeedForwardNetworkQuick &n);
~Reinforcement();
Reinforcement(const Reinforcement&) =delete;
Reinforcement& operator=(const Reinforcement&) =delete;
void setQualityFunction(std::function<double(const Problem&,const Solution&)>);
double learn(const Shin::NeuronNetwork::Problem &p);
double learnSet(const std::vector<Shin::NeuronNetwork::Problem*> &);
void setCoef(double q) {p.setLearningCoeficient(q);}
inline BackPropagation& getPropagator() {return p;}
void setCoef(double q);
inline BackPropagation& getPropagator() {return *p;};
void setPropagator(BackPropagation *p);
protected:
double learningCoeficient=3;
std::function<double(const Problem&,const Solution&)> qualityFunction=nullptr;
BackPropagation p;
BackPropagation *p;
};
}
}

View File

@@ -3,10 +3,11 @@ include ../Makefile.const
LIB_DIR = ../lib
GEN_TESTS=g-01 g-02
NN_TESTS= \
nn-01 nn-02 nn-03 nn-bp-sppeed \
nn-bp-xor \
nn-obp-xor \
nn-rl-xor nn-rl-and \
nn-reinforcement nn-01 nn-02 nn-03 nn-04
nn-reinforcement nn-04
ALL_TESTS=$(NN_TESTS) $(GEN_TESTS)
LIBS=$(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuronNetwork.a

View File

@@ -18,7 +18,7 @@ class X: public Shin::NeuronNetwork::Problem
std::vector<bool> q;
};
int main()
int main(int argc)
{
srand(time(NULL));
std::vector<Shin::NeuronNetwork::Solution> s;
@@ -31,22 +31,29 @@ int main()
s.push_back(Shin::NeuronNetwork::Solution(std::vector<double>({0})));
p.push_back(X(std::vector<bool>({1})));
Shin::NeuronNetwork::FeedForwardNetworkQuick q({1,5000,5000,1});
Shin::NeuronNetwork::FeedForwardNetworkQuick q({1,5000,5000,5000,500,500,500,500});
Shin::NeuronNetwork::Learning::BackPropagation b(q);
int i=0;
std::cerr << i%4 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
if(argc > 1)
{
std::cerr << "THREADING\n";
q.setThreads(4);
}
for(int i=0;i<5;i++)
{
//b.teach(p[i%2],s[i%2]);
std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
}
for(int i=0;i<5;i++)
{
//b.teach(p[i%2],s[i%2]);
// std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
}
for(int i=0;i<2;i++)
{
// b.teach(p[i%2],s[i%2]);
std::cerr << i%4 <<". FOR: [" << p[i%4].representation()[0] << "," <<p[i%4].representation()[0] << "] res: " << q.solve(p[i%4])[0] << " should be " <<
s[i%4][0]<<"\n";
// std::cerr << i%4 <<". FOR: [" << p[i%4].representation()[0] << "," <<p[i%4].representation()[0] << "] res: " << q.solve(p[i%4])[0] << " should be " <<
// s[i%4][0]<<"\n";
}
/*
for(int i=0;i<40;i++)

47
tests/nn-bp-sppeed.cpp Normal file
View File

@@ -0,0 +1,47 @@
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/FeedForwardQuick"
#include "../src/NeuronNetwork/Learning/BackPropagation"
#include <iostream>
#include <vector>
class X: public Shin::NeuronNetwork::Problem
{
public:
X(const X& a) :q(a.q) {}
X(const std::vector<bool> &a):q(a) {}
std::vector<bool> representation() const
{
return q;
}
protected:
std::vector<bool> q;
};
int main(int argc, char*argv)
{
srand(time(NULL));
std::vector<Shin::NeuronNetwork::Solution> s;
std::vector<X> p;
//
s.push_back(Shin::NeuronNetwork::Solution(std::vector<double>({1})));
p.push_back(X(std::vector<bool>({0})));
s.push_back(Shin::NeuronNetwork::Solution(std::vector<double>({0})));
p.push_back(X(std::vector<bool>({1})));
Shin::NeuronNetwork::FeedForwardNetworkQuick q({1,5000,5000,5000,1});
Shin::NeuronNetwork::Learning::BackPropagation b(q);
if(argc >1)
{
std::cerr << "Allowing threadnig\n";
b.allowThreading();
}
for(int i=0;i<2;i++)
{
b.teach(p[i%2],s[i%2]);
std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
}
}

View File

@@ -1,5 +1,6 @@
#include "../src/NeuronNetwork/FeedForwardQuick"
#include "../src/NeuronNetwork/Learning/Reinforcement"
#include "../src/NeuronNetwork/Learning/OpticalBackPropagation"
#include <iostream>
#include <vector>
@@ -20,10 +21,19 @@ class X: public Shin::NeuronNetwork::Problem
int main()
{
for (int test=0;test<2;test++)
for (int test=0;test<3;test++)
{
Shin::NeuronNetwork::FeedForwardNetworkQuick q({2,6,1});
Shin::NeuronNetwork::Learning::Reinforcement b(q);
double targetQuality =1.2;
if(test==2)
{
targetQuality =1.62;
std::cerr << "Testing with OBP ...\n";
b.setPropagator(new Shin::NeuronNetwork::Learning::OpticalBackPropagation(q));
b.getPropagator().setLearningCoeficient(3);
}
b.setQualityFunction(
[](const Shin::NeuronNetwork::Problem &pr,const Shin::NeuronNetwork::Solution &s)->double
{
@@ -42,10 +52,10 @@ int main()
if(expect==0)
{
expect=0.35-s[0];
expect=0.33-s[0];
}else
{
expect=s[0]-0.65;
expect=s[0]-0.67;
}
// std::cerr << " returnning " << expect*5.0 << "\n";
@@ -62,7 +72,7 @@ int main()
p.push_back( new X(std::vector<bool>({0,1})));
p.push_back(new X(std::vector<bool>({1,1})));
if(test)
if(test==1)
{
std::cerr << "Testing with entropy ...\n";
b.getPropagator().allowEntropy();
@@ -70,7 +80,6 @@ int main()
{
std::cerr << "Testing without entropy ...\n";
}
double targetQuality =1.5;
for(int i=0;i < 500000000;i++)
{
@@ -78,7 +87,7 @@ int main()
if(i%100000==0)
srand(time(NULL));
if(i%20000==0 || err > targetQuality)
if(i%40000==0 || err > targetQuality)
{
std::cerr << i << " ("<< err <<").\n";
for(int j=0;j<4;j++)