new test, and multithreading in FFQ, MT in BP not working
This commit is contained in:
@@ -1,4 +1,5 @@
|
||||
#include "FeedForwardQuick"
|
||||
#include <thread>
|
||||
|
||||
using namespace Shin::NeuronNetwork;
|
||||
|
||||
@@ -72,18 +73,55 @@ Solution FeedForwardNetworkQuick::solve(const Problem& p)
|
||||
for(register size_t i=0;i<layers;i++)
|
||||
{
|
||||
double* newSolution= sums[i+1];//new bool[layerSizes[i]];
|
||||
for( size_t j=1;j<layerSizes[i];j++)
|
||||
if(threads > 1 && layerSizes[i] > 600)
|
||||
{
|
||||
newSolution[j]=sol[0]*weights[i][j][0];
|
||||
register size_t k;
|
||||
for(k=1;k<prevSize;k++)
|
||||
std::vector<std::thread> th;
|
||||
size_t s=1;
|
||||
//TODO THIS IS NOT WORKING!!!
|
||||
size_t step =layerSizes[i]/threads;
|
||||
for(size_t t=1;t<=threads;t++)
|
||||
{
|
||||
if(i==0)
|
||||
//TODO do i need it to check?
|
||||
if(s>=layerSizes[i])
|
||||
break;
|
||||
th.push_back(std::thread([i,this,newSolution,prevSize,sol](size_t from, size_t to)->void{
|
||||
|
||||
for( size_t j=from;j<to;j++)
|
||||
{
|
||||
newSolution[j]=sol[0]*weights[i][j][0];
|
||||
register size_t k;
|
||||
for(k=1;k<prevSize;k++)
|
||||
{
|
||||
if(i==0)
|
||||
{
|
||||
newSolution[j]+=sol[k]*weights[i][j][k];
|
||||
}else
|
||||
{
|
||||
newSolution[j]+=(1.0/(1.0+exp(-lambda*sol[k])))*weights[i][j][k];
|
||||
}
|
||||
}
|
||||
}
|
||||
},s,t==threads?layerSizes[i]:s+step));//{}
|
||||
s+=step;
|
||||
}
|
||||
|
||||
for (auto& thr : th)
|
||||
thr.join();
|
||||
}else
|
||||
{
|
||||
for( size_t j=1;j<layerSizes[i];j++)
|
||||
{
|
||||
newSolution[j]=sol[0]*weights[i][j][0];
|
||||
register size_t k;
|
||||
for(k=1;k<prevSize;k++)
|
||||
{
|
||||
newSolution[j]+=sol[k]*weights[i][j][k];
|
||||
}else
|
||||
{
|
||||
newSolution[j]+=(1.0/(1.0+exp(-lambda*sol[k])))*weights[i][j][k];
|
||||
if(i==0)
|
||||
{
|
||||
newSolution[j]+=sol[k]*weights[i][j][k];
|
||||
}else
|
||||
{
|
||||
newSolution[j]+=(1.0/(1.0+exp(-lambda*sol[k])))*weights[i][j][k];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,15 +30,9 @@ namespace NeuronNetwork
|
||||
void setPotential(double p) { *potential=p;}
|
||||
double getWeight(unsigned int i ) { return weights[i];}
|
||||
void setWeight(unsigned int i,double p) { weights[i]=p; }
|
||||
inline double output()
|
||||
{
|
||||
return 1.0/(1.0+(exp(-lambda*input())));
|
||||
return input();
|
||||
// register double tmp=;
|
||||
// return NAN==tmp?0:tmp;
|
||||
/* > *potential? 1 :0;*/ }
|
||||
inline double input() { return *sum; }
|
||||
inline double derivatedOutput() { return lambda*output()*(1.0-output()); };
|
||||
inline double output() const { return 1.0/(1.0+(exp(-lambda*input()))); }
|
||||
inline double input() const { return *sum; }
|
||||
inline double derivatedOutput() const { return lambda*output()*(1.0-output()); }
|
||||
protected:
|
||||
double *potential;
|
||||
double *weights;
|
||||
@@ -110,7 +104,7 @@ namespace NeuronNetwork
|
||||
virtual Solution solve(const Problem& p) override;
|
||||
unsigned size() { return layers;}
|
||||
FFLayer* operator[](int l);
|
||||
|
||||
void setThreads(unsigned t) {threads=t;}
|
||||
protected:
|
||||
private:
|
||||
FFLayer **ffLayers;
|
||||
@@ -122,6 +116,7 @@ namespace NeuronNetwork
|
||||
size_t *layerSizes;
|
||||
size_t layers;
|
||||
double lambda;
|
||||
unsigned threads=1;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#include "./BackPropagation"
|
||||
#include <thread>
|
||||
|
||||
Shin::NeuronNetwork::Learning::BackPropagation::BackPropagation(FeedForwardNetworkQuick &n): Supervised(n)
|
||||
{
|
||||
@@ -22,14 +23,43 @@ void Shin::NeuronNetwork::Learning::BackPropagation::propagate(const Shin::Neuro
|
||||
}
|
||||
}else
|
||||
{
|
||||
for(size_t j=1;j<network[i]->size();j++)
|
||||
if(allowThreads)
|
||||
{
|
||||
register double deltasWeight = 0;
|
||||
for(size_t k=1;k<network[i+1]->size();k++)
|
||||
std::vector<std::thread> th;
|
||||
int s=0;
|
||||
//TODO THIS IS NOT WORKING!!!
|
||||
#define THREADS 4
|
||||
int step =network[i]->size()/THREADS;
|
||||
for(int t=1;t<=THREADS;t++)
|
||||
{
|
||||
deltasWeight+=deltas[i+1][k]*network[i+1]->operator[](k)->getWeight(j);
|
||||
if(s>=network[i]->size())
|
||||
break;
|
||||
th.push_back(std::thread([&i,this,&deltas](size_t from, size_t to)->void{
|
||||
for(size_t j=from;j<to;j++)
|
||||
{
|
||||
register double deltasWeight = 0;
|
||||
for(size_t k=1;k<this->network[i+1]->size();k++)
|
||||
{
|
||||
deltasWeight+=deltas[i+1][k]*this->network[i+1]->operator[](k)->getWeight(j);
|
||||
}
|
||||
//deltas[i][j]*=this->network[i]->operator[](j)->derivatedOutput(); // WHY THE HELL IS SEQ here??
|
||||
}
|
||||
},s,t==THREADS?network[i]->size():s+step));//{}
|
||||
s+=step;
|
||||
}
|
||||
for (auto& thr : th)
|
||||
thr.join();
|
||||
}else
|
||||
{
|
||||
for(size_t j=0;j<network[i]->size();j++)
|
||||
{
|
||||
register double deltasWeight = 0;
|
||||
for(size_t k=1;k<this->network[i+1]->size();k++)
|
||||
{
|
||||
deltasWeight+=deltas[i+1][k]*this->network[i+1]->operator[](k)->getWeight(j);
|
||||
}
|
||||
deltas[i][j]=deltasWeight*this->network[i]->operator[](j)->derivatedOutput();
|
||||
}
|
||||
deltas[i][j]=deltasWeight*network[i]->operator[](j)->derivatedOutput();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,9 +36,11 @@ namespace Learning
|
||||
void setLearningCoeficient (double);
|
||||
void allowEntropy() {entropy=1;}
|
||||
void setEntropySize(int milipercents) { entropySize=milipercents; }
|
||||
inline void allowThreading() {allowThreads=1; }
|
||||
protected:
|
||||
double learningCoeficient=0.4;
|
||||
bool entropy=0;
|
||||
bool allowThreads=0;
|
||||
int entropySize=500;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
#include "./Reinforcement"
|
||||
|
||||
Shin::NeuronNetwork::Learning::Reinforcement::Reinforcement(Shin::NeuronNetwork::FeedForwardNetworkQuick& n): Unsupervised(n), p(n)
|
||||
Shin::NeuronNetwork::Learning::Reinforcement::Reinforcement(Shin::NeuronNetwork::FeedForwardNetworkQuick& n): Unsupervised(n), p(new BackPropagation(n))
|
||||
{
|
||||
p.setLearningCoeficient(9);
|
||||
p->setLearningCoeficient(9);
|
||||
}
|
||||
|
||||
Shin::NeuronNetwork::Learning::Reinforcement::~Reinforcement()
|
||||
{
|
||||
delete p;
|
||||
}
|
||||
|
||||
void Shin::NeuronNetwork::Learning::Reinforcement::setQualityFunction(std::function< double(const Problem&,const Solution&) > f)
|
||||
@@ -30,7 +35,7 @@ double Shin::NeuronNetwork::Learning::Reinforcement::learn(const Shin::NeuronNet
|
||||
}
|
||||
for(register int i=abs((int)quality);i>=0;i--)
|
||||
{
|
||||
p.propagate(q);
|
||||
p->propagate(q);
|
||||
}
|
||||
return quality;
|
||||
}
|
||||
@@ -44,3 +49,14 @@ double Shin::NeuronNetwork::Learning::Reinforcement::learnSet(const std::vector<
|
||||
}
|
||||
return err/problems.size();
|
||||
}
|
||||
|
||||
void Shin::NeuronNetwork::Learning::Reinforcement::setCoef(double q)
|
||||
{
|
||||
p->setLearningCoeficient(q);
|
||||
}
|
||||
|
||||
void Shin::NeuronNetwork::Learning::Reinforcement::setPropagator(Shin::NeuronNetwork::Learning::BackPropagation* prop)
|
||||
{
|
||||
delete p;
|
||||
p=prop;
|
||||
}
|
||||
|
||||
@@ -41,16 +41,20 @@ namespace Learning
|
||||
{
|
||||
public:
|
||||
Reinforcement(FeedForwardNetworkQuick &n);
|
||||
~Reinforcement();
|
||||
Reinforcement(const Reinforcement&) =delete;
|
||||
Reinforcement& operator=(const Reinforcement&) =delete;
|
||||
|
||||
void setQualityFunction(std::function<double(const Problem&,const Solution&)>);
|
||||
double learn(const Shin::NeuronNetwork::Problem &p);
|
||||
double learnSet(const std::vector<Shin::NeuronNetwork::Problem*> &);
|
||||
void setCoef(double q) {p.setLearningCoeficient(q);}
|
||||
inline BackPropagation& getPropagator() {return p;}
|
||||
void setCoef(double q);
|
||||
inline BackPropagation& getPropagator() {return *p;};
|
||||
void setPropagator(BackPropagation *p);
|
||||
protected:
|
||||
double learningCoeficient=3;
|
||||
std::function<double(const Problem&,const Solution&)> qualityFunction=nullptr;
|
||||
BackPropagation p;
|
||||
BackPropagation *p;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user