109 lines
2.6 KiB
C++
109 lines
2.6 KiB
C++
#include "./BackPropagation"
|
|
|
|
NeuralNetwork::Learning::BackPropagation::~BackPropagation()
|
|
{
|
|
if(deltas!=nullptr)
|
|
{
|
|
for(size_t i=0;i<network.size();i++)
|
|
delete[] deltas[i];
|
|
}
|
|
delete[] deltas;
|
|
}
|
|
|
|
void NeuralNetwork::Learning::BackPropagation::propagate(const std::vector<float>& expectation)
|
|
{
|
|
|
|
if(deltas==nullptr)
|
|
{
|
|
deltas=new float*[network.size()];
|
|
for(size_t i=0;i<network.size();i++)
|
|
{
|
|
deltas[i]=new float[network[i].size()];
|
|
deltas[i][0]=0.0;
|
|
}
|
|
}
|
|
|
|
for(size_t j=1;j<network[network.size()-1].size();j++)
|
|
{
|
|
deltas[network.size()-1][j]= correction(expectation[j-1],network[network.size()-1][j].output())
|
|
*network[network.size()-1][j].derivatedOutput();
|
|
}
|
|
|
|
for(int i=(int)network.size()-2;i>0;i--)
|
|
{
|
|
if(allowThreads)
|
|
{
|
|
std::vector<std::thread> th;
|
|
size_t s=0;
|
|
//TODO THIS IS NOT WORKING!!!
|
|
#define THREADS 4
|
|
int step =network[i].size()/THREADS;
|
|
for(int t=1;t<=THREADS;t++)
|
|
{
|
|
if(s>=network[i].size())
|
|
break;
|
|
th.push_back(std::thread([&i,this](size_t from, size_t to)->void{
|
|
for(size_t j=from;j<to;j++)
|
|
{
|
|
register float deltasWeight = 0;
|
|
for(size_t k=1;k<this->network[i+1].size();k++)
|
|
{
|
|
deltasWeight+=deltas[i+1][k]*this->network[i+1][k].getWeight(j);
|
|
}
|
|
//deltas[i][j]*=this->network[i]->operator[](j)->derivatedOutput(); // WHY THE HELL IS SEQ here??
|
|
}
|
|
},s,t==THREADS?network[i].size():s+step));//{}
|
|
s+=step;
|
|
}
|
|
for (auto& thr : th)
|
|
thr.join();
|
|
}else
|
|
{
|
|
for(size_t j=0;j<network[i].size();j++)
|
|
{
|
|
register float deltasWeight = 0;
|
|
for(size_t k=1;k<this->network[i+1].size();k++)
|
|
{
|
|
deltasWeight+=deltas[i+1][k]*this->network[i+1][k].getWeight(j);
|
|
}
|
|
deltas[i][j]=deltasWeight*this->network[i][j].derivatedOutput();
|
|
}
|
|
}
|
|
}
|
|
|
|
for(size_t i=1;i<network.size();i++)
|
|
{
|
|
size_t max=network[i-1].size();
|
|
|
|
for(size_t j=1;j<network[i].size();j++)
|
|
{
|
|
network[i][j].setWeight(0,network[i][j].getWeight(0)+deltas[i][j]*learningCoeficient);
|
|
for(size_t k=1;k<max;k++)
|
|
{
|
|
network[i][j].setWeight(k, network[i][j].getWeight(k)+learningCoeficient*deltas[i][j]*network[i-1][k].output());
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
float NeuralNetwork::Learning::BackPropagation::teach(const std::vector<float>& p, const std::vector<float>& solution)
|
|
{
|
|
std::vector<float> a=network.solve(p);
|
|
double error=calculateError(solution,a);
|
|
|
|
std::vector<float> s;
|
|
if(noise)
|
|
{
|
|
for(size_t i=0;i<solution.size();i++)
|
|
{
|
|
s.push_back(solution[i]*((double)((100000-noiseSize)+(rand()%(noiseSize*2+1)))/100000.0));
|
|
}
|
|
propagate(s);
|
|
}else
|
|
{
|
|
propagate(solution);
|
|
}
|
|
|
|
return error;
|
|
} |