Modification of BackPropagation added, some fixes and refactoring
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
CXX=g++ -m64
|
||||
CXXFLAGS+= -Wall -Wextra -pedantic -Weffc++ -Wshadow -Wstrict-aliasing -ansi
|
||||
CXXFLAGS+= -Wall -Wextra -pedantic -Weffc++ -Wshadow -Wstrict-aliasing -ansi -Woverloaded-virtual -Wdelete-non-virtual-dtor
|
||||
#CXXFLAGS+=-Werror
|
||||
CXXFLAGS+= -g
|
||||
CXXFLAGS+= -O3
|
||||
|
||||
@@ -35,7 +35,8 @@ FeedForwardNetworkQuick::~FeedForwardNetworkQuick()
|
||||
{
|
||||
for (size_t j=0;j<layerSizes[i];j++)
|
||||
{
|
||||
delete[] weights[i][j];
|
||||
if(j!=0)
|
||||
delete[] weights[i][j];
|
||||
}
|
||||
delete[] weights[i];
|
||||
delete[] potentials[i];
|
||||
@@ -71,20 +72,20 @@ Solution FeedForwardNetworkQuick::solve(const Problem& p)
|
||||
for(register size_t i=0;i<layers;i++)
|
||||
{
|
||||
double* newSolution= sums[i+1];//new bool[layerSizes[i]];
|
||||
for(register size_t j=1;j<layerSizes[i];j++)
|
||||
for( size_t j=1;j<layerSizes[i];j++)
|
||||
{
|
||||
register double q=sol[0]*weights[i][j][0];
|
||||
for(register size_t k=1;k<prevSize;k++)
|
||||
newSolution[j]=sol[0]*weights[i][j][0];
|
||||
register size_t k;
|
||||
for(k=1;k<prevSize;k++)
|
||||
{
|
||||
if(i==0)
|
||||
{
|
||||
q+=sol[k]*weights[i][j][k];
|
||||
newSolution[j]+=sol[k]*weights[i][j][k];
|
||||
}else
|
||||
{
|
||||
q+=(1.0/(1.0+exp(-lambda*sol[k])))*weights[i][j][k];
|
||||
newSolution[j]+=(1.0/(1.0+exp(-lambda*sol[k])))*weights[i][j][k];
|
||||
}
|
||||
}
|
||||
newSolution[j]=q;
|
||||
}
|
||||
prevSize=layerSizes[i];
|
||||
sol=newSolution;
|
||||
|
||||
@@ -99,7 +99,7 @@ namespace NeuronNetwork
|
||||
weights[i][j]= new double[prev_size];
|
||||
for(int k=0;k<prev_size;k++)
|
||||
{
|
||||
weights[i][j][k]=0.5-((double)(rand()%1000))/1000.0;
|
||||
weights[i][j][k]=1.0-((double)(rand()%2001))/1000.0;
|
||||
}
|
||||
}
|
||||
i++;
|
||||
|
||||
@@ -5,16 +5,6 @@ Shin::NeuronNetwork::Learning::BackPropagation::BackPropagation(FeedForwardNetwo
|
||||
|
||||
}
|
||||
|
||||
double Shin::NeuronNetwork::Learning::BackPropagation::calculateError(const Shin::NeuronNetwork::Solution& expectation, const Shin::NeuronNetwork::Solution& solution)
|
||||
{
|
||||
register double a=0;
|
||||
for (size_t i=0;i<expectation.size();i++)
|
||||
{
|
||||
a+=pow(expectation[i]-solution[i],2)/2;
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
void Shin::NeuronNetwork::Learning::BackPropagation::propagate(const Shin::NeuronNetwork::Solution& expectation)
|
||||
{
|
||||
double **deltas;
|
||||
@@ -51,8 +41,10 @@ void Shin::NeuronNetwork::Learning::BackPropagation::propagate(const Shin::Neuro
|
||||
max=network[i]->size();
|
||||
else
|
||||
max=network[i-1]->size();
|
||||
|
||||
for(size_t j=1;j<network[i]->size();j++)
|
||||
|
||||
size_t j=1;
|
||||
int size=network[i]->size();
|
||||
for(j=1;j<size;j++)
|
||||
{
|
||||
network[i]->operator[](j)->setWeight(0,network[i]->operator[](j)->getWeight(0)+deltas[i][j]*learningCoeficient);
|
||||
for(size_t k=1;k<max;k++)
|
||||
@@ -75,8 +67,19 @@ double Shin::NeuronNetwork::Learning::BackPropagation::teach(const Shin::NeuronN
|
||||
{
|
||||
Shin::NeuronNetwork::Solution a=network.solve(p);
|
||||
double error=calculateError(solution,a);
|
||||
|
||||
propagate(solution);
|
||||
|
||||
std::vector<double> s;
|
||||
if(entropy)
|
||||
{
|
||||
for(size_t i=0;i<solution.size();i++)
|
||||
{
|
||||
s.push_back(solution[i]*((double)(990+(rand()%21))/1000.0));
|
||||
}
|
||||
propagate(s);
|
||||
}else
|
||||
{
|
||||
propagate(solution);
|
||||
}
|
||||
|
||||
|
||||
// std::cerr << "error: " << error << "\n";
|
||||
|
||||
@@ -24,12 +24,14 @@ namespace Learning
|
||||
{
|
||||
public:
|
||||
BackPropagation(FeedForwardNetworkQuick &n);
|
||||
double calculateError(const Solution &expectation,const Solution &solution);
|
||||
void propagate(const Shin::NeuronNetwork::Solution& expectation);
|
||||
virtual void propagate(const Shin::NeuronNetwork::Solution& expectation);
|
||||
double teach(const Shin::NeuronNetwork::Problem &p,const Solution &solution);
|
||||
|
||||
void setLearningCoeficient (double);
|
||||
void allowEntropy() {entropy=1;}
|
||||
protected:
|
||||
double learningCoeficient=0.4;
|
||||
bool entropy=1;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
1
src/NeuronNetwork/Learning/OpticalBackPropagation
Symbolic link
1
src/NeuronNetwork/Learning/OpticalBackPropagation
Symbolic link
@@ -0,0 +1 @@
|
||||
./OpticalBackPropagation.h
|
||||
65
src/NeuronNetwork/Learning/OpticalBackPropagation.cpp
Normal file
65
src/NeuronNetwork/Learning/OpticalBackPropagation.cpp
Normal file
@@ -0,0 +1,65 @@
|
||||
#include "./OpticalBackPropagation"
|
||||
|
||||
Shin::NeuronNetwork::Learning::OpticalBackPropagation::OpticalBackPropagation(FeedForwardNetworkQuick &n): BackPropagation(n)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
void Shin::NeuronNetwork::Learning::OpticalBackPropagation::propagate(const Shin::NeuronNetwork::Solution& expectation)
|
||||
{
|
||||
double **deltas;
|
||||
deltas=new double*[network.size()];
|
||||
for(int i=(int)network.size()-1;i>=0;i--)
|
||||
{
|
||||
deltas[i]=new double[network[i]->size()];
|
||||
deltas[i][0]=0.0;
|
||||
if(i==(int)network.size()-1)
|
||||
{
|
||||
for(size_t j=1;j<network[i]->size();j++)
|
||||
{
|
||||
register double tmp=(expectation[j-1]-network[i]->operator[](j)->output());
|
||||
deltas[i][j]= (1+exp(tmp*tmp))*network[i]->operator[](j)->derivatedOutput();
|
||||
if(tmp <0)
|
||||
{
|
||||
deltas[i][j]=-deltas[i][j];
|
||||
}
|
||||
}
|
||||
}else
|
||||
{
|
||||
for(size_t j=1;j<network[i]->size();j++)
|
||||
{
|
||||
register double deltasWeight = 0;
|
||||
for(size_t k=1;k<network[i+1]->size();k++)
|
||||
{
|
||||
deltasWeight+=deltas[i+1][k]*network[i+1]->operator[](k)->getWeight(j);
|
||||
}
|
||||
deltas[i][j]=deltasWeight*network[i]->operator[](j)->derivatedOutput();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for(size_t i=0;i<network.size();i++)
|
||||
{
|
||||
size_t max;
|
||||
if(i==0)
|
||||
max=network[i]->size();
|
||||
else
|
||||
max=network[i-1]->size();
|
||||
|
||||
for(size_t j=1;j<network[i]->size();j++)
|
||||
{
|
||||
network[i]->operator[](j)->setWeight(0,network[i]->operator[](j)->getWeight(0)+deltas[i][j]*learningCoeficient);
|
||||
for(size_t k=1;k<max;k++)
|
||||
{
|
||||
network[i]->operator[](j)->setWeight(k,
|
||||
network[i]->operator[](j)->getWeight(k)+learningCoeficient* deltas[i][j]*
|
||||
(i==0? network.sums[0][k]:(double)network[i-1]->operator[](k)->output()));
|
||||
}
|
||||
}
|
||||
}
|
||||
for(size_t i=0;i<network.size();i++)
|
||||
{
|
||||
delete[] deltas[i];
|
||||
}
|
||||
delete[] deltas;
|
||||
}
|
||||
33
src/NeuronNetwork/Learning/OpticalBackPropagation.h
Normal file
33
src/NeuronNetwork/Learning/OpticalBackPropagation.h
Normal file
@@ -0,0 +1,33 @@
|
||||
#ifndef _OPT_BACK_PROPAGATION_H_
|
||||
#define _OPT_BACK_PROPAGATION_H_
|
||||
|
||||
#include <math.h>
|
||||
#include <cstddef>
|
||||
|
||||
#include "../Solution.h"
|
||||
#include "../FeedForwardQuick.h"
|
||||
#include "BackPropagation"
|
||||
|
||||
/*
|
||||
* http://proceedings.informingscience.org/InSITE2005/P106Otai.pdf
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
{
|
||||
namespace Learning
|
||||
{
|
||||
class OpticalBackPropagation : public BackPropagation
|
||||
{
|
||||
public:
|
||||
OpticalBackPropagation(FeedForwardNetworkQuick &n);
|
||||
virtual void propagate(const Shin::NeuronNetwork::Solution& expectation) override;
|
||||
protected:
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -2,10 +2,10 @@
|
||||
|
||||
Shin::NeuronNetwork::Learning::Reinforcement::Reinforcement(Shin::NeuronNetwork::FeedForwardNetworkQuick& n): Unsupervised(n), p(n)
|
||||
{
|
||||
p.setLearningCoeficient(4.5);
|
||||
p.setLearningCoeficient(9);
|
||||
}
|
||||
|
||||
void Shin::NeuronNetwork::Learning::Reinforcement::setQualityFunction(std::function< double(const Solution &s) > f)
|
||||
void Shin::NeuronNetwork::Learning::Reinforcement::setQualityFunction(std::function< double(const Problem&,const Solution&) > f)
|
||||
{
|
||||
qualityFunction=f;
|
||||
}
|
||||
@@ -13,7 +13,7 @@ void Shin::NeuronNetwork::Learning::Reinforcement::setQualityFunction(std::funct
|
||||
double Shin::NeuronNetwork::Learning::Reinforcement::learn(const Shin::NeuronNetwork::Problem& problem)
|
||||
{
|
||||
Solution s=network.solve(problem);
|
||||
double quality=qualityFunction(s);
|
||||
double quality=qualityFunction(problem,s);
|
||||
std::vector<double> q;
|
||||
for(register size_t j=0;j<s.size();j++)
|
||||
{
|
||||
@@ -35,57 +35,12 @@ double Shin::NeuronNetwork::Learning::Reinforcement::learn(const Shin::NeuronNet
|
||||
return quality;
|
||||
}
|
||||
|
||||
void Shin::NeuronNetwork::Learning::Reinforcement::propagate(const Shin::NeuronNetwork::Solution& expectation,bool random)
|
||||
double Shin::NeuronNetwork::Learning::Reinforcement::learnSet(const std::vector< Shin::NeuronNetwork::Problem* >& problems)
|
||||
{
|
||||
double **deltas;
|
||||
deltas=new double*[network.size()];
|
||||
for(int i=(int)network.size()-1;i>=0;i--)
|
||||
double err=0;
|
||||
for(Shin::NeuronNetwork::Problem *pr:problems)
|
||||
{
|
||||
deltas[i]=new double[network[i]->size()];
|
||||
deltas[i][0]=0.0;
|
||||
if(i==(int)network.size()-1)
|
||||
{
|
||||
for(size_t j=1;j<network[i]->size();j++)
|
||||
{
|
||||
deltas[i][j]= (expectation[j-1]-network[i]->operator[](j)->output())*network[i]->operator[](j)->derivatedOutput();
|
||||
// std::cerr << "X "<< deltas[i][j] <" Z ";
|
||||
}
|
||||
}else
|
||||
{
|
||||
for(size_t j=1;j<network[i]->size();j++)
|
||||
{
|
||||
register double deltasWeight = 0;
|
||||
for(size_t k=1;k<network[i+1]->size();k++)
|
||||
{
|
||||
deltasWeight+=deltas[i+1][k]*network[i+1]->operator[](k)->getWeight(j);
|
||||
}
|
||||
deltas[i][j]=deltasWeight*network[i]->operator[](j)->derivatedOutput();
|
||||
}
|
||||
}
|
||||
err+=learn(*pr);
|
||||
}
|
||||
|
||||
for(size_t i=0;i<network.size();i++)
|
||||
{
|
||||
size_t max;
|
||||
if(i==0)
|
||||
max=network[i]->size();
|
||||
else
|
||||
max=network[i-1]->size();
|
||||
|
||||
for(size_t j=1;j<network[i]->size();j++)
|
||||
{
|
||||
network[i]->operator[](j)->setWeight(0,network[i]->operator[](j)->getWeight(0)+deltas[i][j]*learningCoeficient);
|
||||
for(size_t k=1;k<max;k++)
|
||||
{
|
||||
network[i]->operator[](j)->setWeight(k,
|
||||
network[i]->operator[](j)->getWeight(k)+learningCoeficient* deltas[i][j]*
|
||||
(i==0? network.sums[0][k]:(double)network[i-1]->operator[](k)->output()));
|
||||
}
|
||||
}
|
||||
}
|
||||
for(size_t i=0;i<network.size();i++)
|
||||
{
|
||||
delete[] deltas[i];
|
||||
}
|
||||
delete[] deltas;
|
||||
}
|
||||
return err/problems.size();
|
||||
}
|
||||
|
||||
@@ -27,13 +27,14 @@ namespace Learning
|
||||
public:
|
||||
Reinforcement(FeedForwardNetworkQuick &n);
|
||||
|
||||
void setQualityFunction(std::function<double(const Solution &s)>);
|
||||
void setQualityFunction(std::function<double(const Problem&,const Solution&)>);
|
||||
double learn(const Shin::NeuronNetwork::Problem &p);
|
||||
void propagate(const Shin::NeuronNetwork::Solution& expectation,bool random=0);
|
||||
double learnSet(const std::vector<Shin::NeuronNetwork::Problem*> &);
|
||||
void setCoef(double q) {p.setLearningCoeficient(q);}
|
||||
inline BackPropagation& getPropagator() {return p;}
|
||||
protected:
|
||||
double learningCoeficient=3;
|
||||
std::function<double(const Solution &s)> qualityFunction=nullptr;
|
||||
std::function<double(const Problem&,const Solution&)> qualityFunction=nullptr;
|
||||
BackPropagation p;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -4,6 +4,27 @@ Shin::NeuronNetwork::Learning::Supervised::Supervised(Shin::NeuronNetwork::FeedF
|
||||
|
||||
}
|
||||
|
||||
|
||||
double Shin::NeuronNetwork::Learning::Supervised::calculateError(const Shin::NeuronNetwork::Solution& expectation, const Shin::NeuronNetwork::Solution& solution)
|
||||
{
|
||||
register double a=0;
|
||||
for (size_t i=0;i<expectation.size();i++)
|
||||
{
|
||||
a+=pow(expectation[i]-solution[i],2)/2;
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
double Shin::NeuronNetwork::Learning::Supervised::teachSet(std::vector< Shin::NeuronNetwork::Problem* >& p, std::vector< Shin::NeuronNetwork::Solution* >& solution)
|
||||
{
|
||||
double error=0;
|
||||
for (register size_t i=0;i<p.size();i++)
|
||||
{
|
||||
error+=teach(*p[i],*solution[i]);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
void Shin::NeuronNetwork::Learning::Supervised::debugOn()
|
||||
{
|
||||
debug=1;
|
||||
|
||||
@@ -19,8 +19,9 @@ namespace Learning
|
||||
Supervised() =delete;
|
||||
Supervised(FeedForwardNetworkQuick &n);
|
||||
virtual ~Supervised() {};
|
||||
virtual double calculateError(const Solution &expectation,const Solution &solution)=0;
|
||||
double calculateError(const Solution &expectation,const Solution &solution);
|
||||
virtual double teach(const Shin::NeuronNetwork::Problem &p,const Solution &solution)=0;
|
||||
double teachSet(std::vector<Shin::NeuronNetwork::Problem*> &p,std::vector<Shin::NeuronNetwork::Solution*> &solution);
|
||||
void debugOn();
|
||||
void debugOff();
|
||||
protected:
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
OBJFILES= Neuron.o Network.o FeedForward.o FeedForwardQuick.o \
|
||||
Learning/Supervised.o Learning/Unsupervised.o Learning/Reinforcement.o Learning/BackPropagation.o \
|
||||
OBJFILES= Neuron.o Network.o FeedForward.o FeedForwardQuick.o\
|
||||
Learning/Supervised.o Learning/BackPropagation.o Learning/OpticalBackPropagation.o\
|
||||
Learning/Unsupervised.o Learning/Reinforcement.o\
|
||||
Solution.o Problem.o
|
||||
|
||||
LIBNAME=NeuronNetwork
|
||||
|
||||
@@ -14,8 +14,8 @@ namespace NeuronNetwork
|
||||
Problem();
|
||||
virtual ~Problem(){};
|
||||
operator std::vector<bool>() const;
|
||||
protected:
|
||||
virtual std::vector<bool> representation() const =0;
|
||||
protected:
|
||||
private:
|
||||
};
|
||||
}
|
||||
|
||||
@@ -2,7 +2,11 @@ include ../Makefile.const
|
||||
|
||||
LIB_DIR = ../lib
|
||||
GEN_TESTS=g-01 g-02
|
||||
NN_TESTS= nn-reinforcement nn-01 nn-02 nn-03 nn-04
|
||||
NN_TESTS= \
|
||||
nn-bp-xor \
|
||||
nn-obp-xor \
|
||||
nn-rl-xor nn-rl-and \
|
||||
nn-reinforcement nn-01 nn-02 nn-03 nn-04
|
||||
ALL_TESTS=$(NN_TESTS) $(GEN_TESTS)
|
||||
|
||||
LIBS=$(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuronNetwork.a
|
||||
|
||||
@@ -31,25 +31,23 @@ int main()
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<double>({0})));
|
||||
p.push_back(X(std::vector<bool>({1})));
|
||||
|
||||
Shin::NeuronNetwork::FeedForwardNetworkQuick q({1,1});
|
||||
Shin::NeuronNetwork::FeedForwardNetworkQuick q({1,5000,5000,1});
|
||||
Shin::NeuronNetwork::Learning::BackPropagation b(q);
|
||||
|
||||
int i=0;
|
||||
std::cerr << i%4 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
|
||||
|
||||
for(int i=0;i<2000;i++)
|
||||
for(int i=0;i<5;i++)
|
||||
{
|
||||
b.teach(p[i%2],s[i%2]);
|
||||
std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
|
||||
//b.teach(p[i%2],s[i%2]);
|
||||
// std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
|
||||
}
|
||||
b.debugOn();
|
||||
for(int i=0;i<2;i++)
|
||||
{
|
||||
b.teach(p[i%2],s[i%2]);
|
||||
// b.teach(p[i%2],s[i%2]);
|
||||
std::cerr << i%4 <<". FOR: [" << p[i%4].representation()[0] << "," <<p[i%4].representation()[0] << "] res: " << q.solve(p[i%4])[0] << " should be " <<
|
||||
s[i%4][0]<<"\n";
|
||||
}
|
||||
b.debugOff();
|
||||
/*
|
||||
for(int i=0;i<40;i++)
|
||||
{
|
||||
|
||||
74
tests/nn-04.cpp
Normal file
74
tests/nn-04.cpp
Normal file
@@ -0,0 +1,74 @@
|
||||
#include "../src/NeuronNetwork/Network"
|
||||
|
||||
#include <iostream>
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
{
|
||||
public: X(bool x,bool y):x(x),y(y) {}
|
||||
protected: std::vector<bool> representation() const { return std::vector<bool>({x,y}); }
|
||||
private:
|
||||
bool x;
|
||||
bool y;
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
srand(time(NULL));
|
||||
int lm=5;
|
||||
Shin::NeuronNetwork::FeedForwardNetwork net({2,lm,1});
|
||||
bool x=1;
|
||||
int prev_err=0;
|
||||
int err=0;
|
||||
int l;
|
||||
int n;
|
||||
int w;
|
||||
int pot;
|
||||
int wei;
|
||||
int c=0;
|
||||
std::cout << "\ntest 1 & 1 -" << net.solve(X(1,1))[0];
|
||||
std::cout << "\ntest 1 & 0 -" << net.solve(X(1,0))[0];
|
||||
std::cout << "\ntest 0 & 1 - " << net.solve(X(0,1))[0];
|
||||
std::cout << "\ntest 0 & 0- " << net.solve(X(0,0))[0];
|
||||
std::cout << "\n---------------------------------------";
|
||||
do{
|
||||
if(c%10000 ==1)
|
||||
{
|
||||
std::cout << "\nmixed";
|
||||
srand(time(NULL));
|
||||
}
|
||||
err=0;
|
||||
c++;
|
||||
l=rand()%2+1;
|
||||
n=rand()%lm;
|
||||
w=rand()%2;
|
||||
if(l==2)
|
||||
n=0;
|
||||
pot=net[l]->operator[](n)->getPotential();
|
||||
net[l]->operator[](n)->setPotential(pot*(rand()%21+90)/100);
|
||||
wei=net[l]->operator[](n)->getWeight(w);
|
||||
net[l]->operator[](n)->setWeight(w,wei*(rand()%21+90)/100);
|
||||
|
||||
for(int i=0;i<100;i++)
|
||||
{
|
||||
bool x= rand()%2;
|
||||
bool y=rand()%2;
|
||||
Shin::NeuronNetwork::Solution s =net.solve(X(x,y));
|
||||
if(s[0]!= (x xor y))
|
||||
err++;
|
||||
}
|
||||
|
||||
if(err > prev_err)
|
||||
{
|
||||
net[l]->operator[](n)->setPotential(pot);
|
||||
net[l]->operator[](n)->setWeight(w,wei);
|
||||
};
|
||||
// std::cout << "C: " << c << " err: " << err << " prev: "<<prev_err << "\n";
|
||||
prev_err=err;
|
||||
if(err <1)
|
||||
x=0;
|
||||
}while(x);
|
||||
std::cout << "\ntest 1 & 1 -" << net.solve(X(1,1))[0];
|
||||
std::cout << "\ntest 1 & 0 -" << net.solve(X(1,0))[0];
|
||||
std::cout << "\ntest 0 & 1 - " << net.solve(X(0,1))[0];
|
||||
std::cout << "\ntest 0 & 0- " << net.solve(X(0,0))[0];
|
||||
std::cout << "\nTotaly: " << c << "\n";
|
||||
}
|
||||
77
tests/nn-bp-xor.cpp
Normal file
77
tests/nn-bp-xor.cpp
Normal file
@@ -0,0 +1,77 @@
|
||||
#include "../src/NeuronNetwork/FeedForwardQuick"
|
||||
#include "../src/NeuronNetwork/Learning/BackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :q(a.q) {}
|
||||
X(const std::vector<bool> &a):q(a) {}
|
||||
std::vector<bool> representation() const
|
||||
{
|
||||
return q;
|
||||
}
|
||||
protected:
|
||||
std::vector<bool> q;
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
|
||||
for (int test=0;test<2;test++)
|
||||
{
|
||||
Shin::NeuronNetwork::FeedForwardNetworkQuick q({2,4,1});
|
||||
Shin::NeuronNetwork::Learning::BackPropagation b(q);
|
||||
|
||||
srand(time(NULL));
|
||||
std::vector<Shin::NeuronNetwork::Solution*> s;
|
||||
std::vector<Shin::NeuronNetwork::Problem*> p;
|
||||
|
||||
s.push_back(new Shin::NeuronNetwork::Solution(std::vector<double>({0})));
|
||||
p.push_back(new X(std::vector<bool>({0,0})));
|
||||
|
||||
s.push_back( new Shin::NeuronNetwork::Solution(std::vector<double>({1})));
|
||||
p.push_back( new X(std::vector<bool>({1,0})));
|
||||
|
||||
s.push_back(new Shin::NeuronNetwork::Solution(std::vector<double>({0})));
|
||||
p.push_back(new X(std::vector<bool>({1,1})));
|
||||
|
||||
s.push_back( new Shin::NeuronNetwork::Solution(std::vector<double>({1})));
|
||||
p.push_back( new X(std::vector<bool>({0,1})));
|
||||
|
||||
if(test)
|
||||
{
|
||||
std::cerr << "Testing with entropy\n";
|
||||
b.allowEntropy();
|
||||
}else
|
||||
{
|
||||
std::cerr << "Testing without entropy\n";
|
||||
}
|
||||
b.setLearningCoeficient(0.1);//8);
|
||||
for(int j=0;;j++)
|
||||
{
|
||||
double err=b.teachSet(p,s);
|
||||
if(err <0.3)
|
||||
{
|
||||
// b.setLearningCoeficient(5);
|
||||
}
|
||||
if(err <0.1)
|
||||
{
|
||||
// b.setLearningCoeficient(0.2);
|
||||
}
|
||||
if(err <0.001)
|
||||
{
|
||||
std::cerr << j << "(" << err <<"):\n";
|
||||
for(int i=0;i<4;i++)
|
||||
{
|
||||
std::cerr << "\t" << i%4 <<". FOR: [" << p[i%4]->representation()[0] << "," <<p[i%4]->representation()[1] << "] res: " <<
|
||||
q.solve(*p[i%4])[0] << " should be " << s[i%4]->operator[](0)<<"\n";
|
||||
}
|
||||
}
|
||||
if(err <0.001)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
78
tests/nn-obp-xor.cpp
Normal file
78
tests/nn-obp-xor.cpp
Normal file
@@ -0,0 +1,78 @@
|
||||
#include "../src/NeuronNetwork/FeedForwardQuick"
|
||||
#include "../src/NeuronNetwork/Learning/OpticalBackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :q(a.q) {}
|
||||
X(const std::vector<bool> &a):q(a) {}
|
||||
std::vector<bool> representation() const
|
||||
{
|
||||
return q;
|
||||
}
|
||||
protected:
|
||||
std::vector<bool> q;
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
|
||||
for (int test=0;test<2;test++)
|
||||
{
|
||||
Shin::NeuronNetwork::FeedForwardNetworkQuick q({2,4,1});
|
||||
Shin::NeuronNetwork::Learning::OpticalBackPropagation b(q);
|
||||
|
||||
srand(time(NULL));
|
||||
std::vector<Shin::NeuronNetwork::Solution*> s;
|
||||
std::vector<Shin::NeuronNetwork::Problem*> p;
|
||||
|
||||
s.push_back(new Shin::NeuronNetwork::Solution(std::vector<double>({0})));
|
||||
p.push_back(new X(std::vector<bool>({0,0})));
|
||||
|
||||
s.push_back( new Shin::NeuronNetwork::Solution(std::vector<double>({1})));
|
||||
p.push_back( new X(std::vector<bool>({1,0})));
|
||||
|
||||
s.push_back(new Shin::NeuronNetwork::Solution(std::vector<double>({0})));
|
||||
p.push_back(new X(std::vector<bool>({1,1})));
|
||||
|
||||
s.push_back( new Shin::NeuronNetwork::Solution(std::vector<double>({1})));
|
||||
p.push_back( new X(std::vector<bool>({0,1})));
|
||||
|
||||
b.debugOn();
|
||||
if(test)
|
||||
{
|
||||
std::cerr << "Testing with entropy\n";
|
||||
b.allowEntropy();
|
||||
}else
|
||||
{
|
||||
std::cerr << "Testing without entropy\n";
|
||||
}
|
||||
b.setLearningCoeficient(0.1);
|
||||
for(int j=0;;j++)
|
||||
{
|
||||
double err=b.teachSet(p,s);
|
||||
if(err <0.3)
|
||||
{
|
||||
// b.setLearningCoeficient(5);
|
||||
}
|
||||
if(err <0.1)
|
||||
{
|
||||
// b.setLearningCoeficient(0.2);
|
||||
}
|
||||
if(err <0.001)
|
||||
{
|
||||
std::cerr << j << "(" << err <<"):\n";
|
||||
for(int i=0;i<4;i++)
|
||||
{
|
||||
std::cerr << "\t" << i%4 <<". FOR: [" << p[i%4]->representation()[0] << "," <<p[i%4]->representation()[1] << "] res: " <<
|
||||
q.solve(*p[i%4])[0] << " should be " << s[i%4]->operator[](0)<<"\n";
|
||||
}
|
||||
}
|
||||
if(err <0.001)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
85
tests/nn-rl-and.cpp
Normal file
85
tests/nn-rl-and.cpp
Normal file
@@ -0,0 +1,85 @@
|
||||
#include "../src/NeuronNetwork/FeedForwardQuick"
|
||||
#include "../src/NeuronNetwork/Learning/Reinforcement.h"
|
||||
#include "../src/NeuronNetwork/Solution.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :q(a.q) {}
|
||||
X(const std::vector<bool> &a):q(a) {}
|
||||
std::vector<bool> representation() const
|
||||
{
|
||||
return q;
|
||||
}
|
||||
protected:
|
||||
std::vector<bool> q;
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
srand(time(NULL));
|
||||
|
||||
std::vector<Shin::NeuronNetwork::Problem*> p;
|
||||
|
||||
p.push_back(new X(std::vector<bool>({0,0})));
|
||||
|
||||
p.push_back(new X(std::vector<bool>({1,1})));
|
||||
|
||||
Shin::NeuronNetwork::FeedForwardNetworkQuick q({1,1});
|
||||
Shin::NeuronNetwork::Learning::Reinforcement b(q);
|
||||
int i=0;
|
||||
double targetQuality=1.4;
|
||||
b.setQualityFunction(
|
||||
[](const Shin::NeuronNetwork::Problem &pr,const Shin::NeuronNetwork::Solution &s)->double
|
||||
{
|
||||
if(pr.representation()[0]==0)
|
||||
{
|
||||
//ocekavame 1
|
||||
int e=(s[0]-0.80)*15.0;//+(abs(s[1])-0.5)*100.0;
|
||||
return e;
|
||||
}else
|
||||
{
|
||||
//ocekavame 0
|
||||
int e=(0.20-s[0])*15.0;//+(0.4-abs(s[1]))*100.0;
|
||||
return e;
|
||||
}
|
||||
return 1.0;
|
||||
});
|
||||
for(i=0;i < 500000000;i++)
|
||||
{
|
||||
double err=b.learnSet(p);
|
||||
|
||||
if(i%100000==0)
|
||||
srand(time(NULL));
|
||||
if(err > targetQuality)
|
||||
{
|
||||
std::cerr << i << " ("<< err <<").\n";
|
||||
for(int j=0;j<2;j++)
|
||||
{
|
||||
std::cerr << j%4 <<". FOR: [" << p[j%4]->representation()[0] << "," <<p[j%4]->representation()[0] << "] res: " << q.solve(*p[j%4])[0] << "\n";
|
||||
}
|
||||
}
|
||||
if(err >targetQuality)
|
||||
break;
|
||||
}
|
||||
|
||||
/* int i=0;
|
||||
std::cerr << i%4 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
|
||||
|
||||
for(int i=0;i<2000;i++)sa
|
||||
{
|
||||
b.teach(p[i%2],s[i%2]);
|
||||
std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
|
||||
}
|
||||
b.debugOn();
|
||||
for(int i=0;i<2;i++)
|
||||
{
|
||||
b.teach(p[i%2],s[i%2]);
|
||||
std::cerr << i%4 <<". FOR: [" << p[i%4].representation()[0] << "," <<p[i%4].representation()[0] << "] res: " << q.solve(p[i%4])[0] << " should be " <<
|
||||
s[i%4][0]<<"\n";
|
||||
}
|
||||
b.debugOff();*/
|
||||
}
|
||||
94
tests/nn-rl-xor.cpp
Normal file
94
tests/nn-rl-xor.cpp
Normal file
@@ -0,0 +1,94 @@
|
||||
#include "../src/NeuronNetwork/FeedForwardQuick"
|
||||
#include "../src/NeuronNetwork/Learning/Reinforcement"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :q(a.q) {}
|
||||
X(const std::vector<bool> &a):q(a) {}
|
||||
std::vector<bool> representation() const
|
||||
{
|
||||
return q;
|
||||
}
|
||||
protected:
|
||||
std::vector<bool> q;
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
|
||||
for (int test=0;test<2;test++)
|
||||
{
|
||||
Shin::NeuronNetwork::FeedForwardNetworkQuick q({2,6,1});
|
||||
Shin::NeuronNetwork::Learning::Reinforcement b(q);
|
||||
b.setQualityFunction(
|
||||
[](const Shin::NeuronNetwork::Problem &pr,const Shin::NeuronNetwork::Solution &s)->double
|
||||
{
|
||||
std::vector <bool> p=pr;
|
||||
double expect=0.0;
|
||||
if(p[0] && p[1])
|
||||
expect=0;
|
||||
else if(p[0] && !p[1])
|
||||
expect=1;
|
||||
else if(!p[0] && !p[1])
|
||||
expect=0;
|
||||
else if(!p[0] && p[1])
|
||||
expect=1;
|
||||
|
||||
// std::cerr << "expected: " << expect << " got " << s[0];
|
||||
|
||||
if(expect==0)
|
||||
{
|
||||
expect=0.35-s[0];
|
||||
}else
|
||||
{
|
||||
expect=s[0]-0.65;
|
||||
}
|
||||
|
||||
// std::cerr << " returnning " << expect*5.0 << "\n";
|
||||
|
||||
return expect*5.0;
|
||||
});
|
||||
|
||||
srand(time(NULL));
|
||||
|
||||
std::vector<Shin::NeuronNetwork::Problem*> p;
|
||||
|
||||
p.push_back(new X(std::vector<bool>({0,0})));
|
||||
p.push_back( new X(std::vector<bool>({1,0})));
|
||||
p.push_back( new X(std::vector<bool>({0,1})));
|
||||
p.push_back(new X(std::vector<bool>({1,1})));
|
||||
|
||||
if(test)
|
||||
{
|
||||
std::cerr << "Testing with entropy ...\n";
|
||||
b.getPropagator().allowEntropy();
|
||||
}else
|
||||
{
|
||||
std::cerr << "Testing without entropy ...\n";
|
||||
}
|
||||
double targetQuality =1.5;
|
||||
|
||||
for(int i=0;i < 500000000;i++)
|
||||
{
|
||||
double err=b.learnSet(p);
|
||||
|
||||
if(i%100000==0)
|
||||
srand(time(NULL));
|
||||
if(i%20000==0 || err > targetQuality)
|
||||
{
|
||||
std::cerr << i << " ("<< err <<").\n";
|
||||
for(int j=0;j<4;j++)
|
||||
{
|
||||
std::cerr << "\t" << j%4 << ". FOR: [" << p[j%4]->representation()[0] << "," <<p[j%4]->representation()[1] << "] res: " <<
|
||||
q.solve(*p[j%4])[0] << "\n";
|
||||
}
|
||||
}
|
||||
if(err >targetQuality)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user