started moving learning algos to new namespace Machnine Learning
This commit is contained in:
33
Makefile
33
Makefile
@@ -7,23 +7,35 @@ all:|pre libs
|
||||
pre:
|
||||
@mkdir -p lib
|
||||
|
||||
libs: genetics nn
|
||||
libs: ml genetics nn
|
||||
|
||||
test: all
|
||||
make -C tests
|
||||
|
||||
nn: | nn_build lib/NeuronNetwork.a lib/NeuronNetwork.so
|
||||
|
||||
lib/NeuronNetwork.so: ./src/NeuronNetwork/NeuronNetwork.so
|
||||
cp ./src/NeuronNetwork/NeuronNetwork.so ./lib/
|
||||
ml: | ml_build lib/MachineLearning.a lib/MachineLearning.so
|
||||
|
||||
lib/NeuronNetwork.a: ./src/NeuronNetwork/NeuronNetwork.a
|
||||
cp ./src/NeuronNetwork/NeuronNetwork.a ./lib/
|
||||
cp ./src/NeuronNetwork/NeuronNetwork.nm ./lib/
|
||||
lib/MachineLearning.so: ./src/MachineLearning/MachineLearning.so
|
||||
cp ./src/MachineLearning/MachineLearning.so ./lib/
|
||||
|
||||
lib/MachineLearning.a: ./src/MachineLearning/MachineLearning.a
|
||||
cp ./src/MachineLearning/MachineLearning.a ./lib/
|
||||
cp ./src/MachineLearning/MachineLearning.nm ./lib/
|
||||
|
||||
ml_build:
|
||||
@make -C src/MachineLearning
|
||||
|
||||
nn: | nn_build lib/NeuralNetwork.a lib/NeuralNetwork.so
|
||||
|
||||
lib/NeuralNetwork.so: ./src/NeuralNetwork/NeuralNetwork.so
|
||||
cp ./src/NeuralNetwork/NeuralNetwork.so ./lib/
|
||||
|
||||
lib/NeuralNetwork.a: ./src/NeuralNetwork/NeuralNetwork.a
|
||||
cp ./src/NeuralNetwork/NeuralNetwork.a ./lib/
|
||||
cp ./src/NeuralNetwork/NeuralNetwork.nm ./lib/
|
||||
|
||||
nn_build:
|
||||
@make -C src/NeuronNetwork
|
||||
|
||||
@make -C src/NeuralNetwork
|
||||
|
||||
genetics: | genetics_build lib/Genetics.a lib/Genetics.so
|
||||
|
||||
@@ -38,8 +50,9 @@ genetics_build:
|
||||
@make -C src/Genetics
|
||||
|
||||
clean:
|
||||
@make -C src/MachineLearning clean
|
||||
@make -C src/Genetics clean
|
||||
@make -C src/NeuronNetwork clean
|
||||
@make -C src/NeuralNetwork clean
|
||||
@make -C tests clean
|
||||
#@rm -f ./*.so ./*.a ./*.nm
|
||||
@rm -f ./lib/*.so ./lib/*.a ./lib/*.nm
|
||||
|
||||
@@ -5,7 +5,7 @@ CXXFLAGS+= -std=c++14
|
||||
CXXFLAGS+= -pg -fPIC
|
||||
CXXFLAGS+= -g
|
||||
CXXFLAGS+= -fPIC -pthread
|
||||
|
||||
CXXFLAGS+= -DUSE_SSE
|
||||
OPTIMALIZATION = -O3 -march=native -mtune=native
|
||||
|
||||
%.o : %.cpp %.h
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#include "./IO"
|
||||
|
||||
Shin::NeuronNetwork::IO Shin::NeuronNetwork::IO::operator+(const IO &r)
|
||||
Shin::IO Shin::IO::operator+(const IO &r)
|
||||
{
|
||||
Shin::NeuronNetwork::IO tmp;
|
||||
for(float a:this->data)
|
||||
@@ -6,13 +6,11 @@
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
{
|
||||
class IO
|
||||
{
|
||||
public:
|
||||
IO() {};
|
||||
IO(std::vector<float> &d) : data(d) {}
|
||||
IO(const std::vector<float> &d) : data(d) {}
|
||||
IO(const IO &old) : data(old.data) {}
|
||||
IO(const std::initializer_list<float> &a):data(a) { }
|
||||
virtual ~IO() {};
|
||||
@@ -27,5 +25,4 @@ class IO
|
||||
private:
|
||||
};
|
||||
}
|
||||
}
|
||||
#endif
|
||||
35
src/MachineLearning/Learning.h
Normal file
35
src/MachineLearning/Learning.h
Normal file
@@ -0,0 +1,35 @@
|
||||
#ifndef _S_ML_LEARNING_H_
|
||||
#define _S_ML_LEARNING_H_
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace MachineLearning
|
||||
{
|
||||
const float LearningCoeficient=0.4;
|
||||
const float DefaultNoiseSize=500;
|
||||
class Learning
|
||||
{
|
||||
public:
|
||||
inline Learning() {};
|
||||
inline virtual ~Learning() {};
|
||||
|
||||
inline virtual void setLearningCoeficient (const float& coef) { learningCoeficient=coef; };
|
||||
|
||||
inline virtual void allowThreading() final {allowThreads=1;}
|
||||
inline virtual void disableThreading() final {allowThreads=0;}
|
||||
|
||||
inline virtual void allowNoise() final {noise=1;}
|
||||
inline virtual void disableNoise() final {noise=0;}
|
||||
inline virtual void setNoiseSize(const unsigned& milipercents) final { noiseSize=milipercents; }
|
||||
|
||||
protected:
|
||||
float learningCoeficient=Shin::MachineLearning::LearningCoeficient;
|
||||
bool allowThreads=0;
|
||||
bool noise=0;
|
||||
unsigned noiseSize=Shin::MachineLearning::DefaultNoiseSize;
|
||||
};
|
||||
}
|
||||
}
|
||||
#endif
|
||||
24
src/MachineLearning/Makefile
Normal file
24
src/MachineLearning/Makefile
Normal file
@@ -0,0 +1,24 @@
|
||||
OBJFILES=\
|
||||
QLearning.o
|
||||
|
||||
LINKFILES=
|
||||
|
||||
LIBNAME=MachineLearning
|
||||
|
||||
include ../../Makefile.const
|
||||
|
||||
all: lib
|
||||
|
||||
lib: $(LIBNAME).so $(LIBNAME).a
|
||||
|
||||
$(LIBNAME).so: $(OBJFILES)
|
||||
$(CXX) -shared $(CXXFLAGS) $(OBJFILES) $(LINKFILES) -o $(LIBNAME).so
|
||||
|
||||
$(LIBNAME).a: $(OBJFILES) ./Learning.h
|
||||
rm -f $(LIBNAME).a # create new library
|
||||
ar rcv $(LIBNAME).a $(OBJFILES) $(LINKFILES)
|
||||
ranlib $(LIBNAME).a
|
||||
nm --demangle $(LIBNAME).a > $(LIBNAME).nm
|
||||
|
||||
clean:
|
||||
@rm -f ./*.o ./*.so ./*.a ./*.nm ./*/*.o
|
||||
@@ -3,28 +3,29 @@
|
||||
|
||||
#include <map>
|
||||
|
||||
#include "../../Solution.h"
|
||||
#include "../../FeedForward.h"
|
||||
#include "../BackPropagation.h"
|
||||
#include "../OpticalBackPropagation.h"
|
||||
#include "Unsupervised.h"
|
||||
|
||||
#include "../Solution.h"
|
||||
//#include "../FeedForward.h"
|
||||
//#include "BackPropagation.h"
|
||||
//#include "OpticalBackPropagation.h"
|
||||
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
{
|
||||
namespace RL
|
||||
namespace MachineLearning
|
||||
{
|
||||
class QFunction
|
||||
{
|
||||
public:
|
||||
QFunction();
|
||||
virtual ~QFunction();
|
||||
virtual void learnDelayed(std::vector<std::pair<Solution,Problem>> &p, float quality)=0;
|
||||
virtual void learn(Solution &s, Problem &p, float quality)=0;
|
||||
//virtual void learnDelayed(std::vector<std::pair<Solution,Problem>> &p, float quality)=0;
|
||||
//virtual void learn(Solution &s, Problem &p, float quality)=0;
|
||||
protected:
|
||||
float learningCoeficient;
|
||||
};
|
||||
|
||||
/*
|
||||
class QFunctionTable : public QFunction
|
||||
{
|
||||
public:
|
||||
@@ -83,15 +84,11 @@ namespace RL
|
||||
|
||||
virtual int getChoice(Problem &p);
|
||||
virtual Solution getSolution(Problem &p) {return function->solve(p);}
|
||||
void setLearningCoeficient(double ok, double err) {learningA=ok;learningB=err;};
|
||||
void opticalBackPropagation() {delete b; b=new Learning::OpticalBackPropagation(*function);};
|
||||
private:
|
||||
Learning::BackPropagation *b;
|
||||
FeedForward * function;
|
||||
float learningA=0.05;
|
||||
float learningB=0.008;
|
||||
};
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
32
src/MachineLearning/QLearning.cpp
Normal file
32
src/MachineLearning/QLearning.cpp
Normal file
@@ -0,0 +1,32 @@
|
||||
#include "./QLearning"
|
||||
|
||||
void Shin::MachineLearning::QLearning::learnDelayed(std::vector< std::pair< Shin::Problem, int > >& p, float quality)
|
||||
{
|
||||
std::vector<std::pair<Problem,Solution>> q;
|
||||
register int solSize=0;
|
||||
if(p.size()>0)
|
||||
solSize=getSolution(p[0].first).size();
|
||||
if (!solSize)
|
||||
return;
|
||||
|
||||
for(size_t i=0;i<p.size();i++)
|
||||
{
|
||||
Solution s;
|
||||
for(int j=0;j<solSize;j++)
|
||||
{
|
||||
s.push_back(j==p[i].second?1:0);
|
||||
}
|
||||
q.push_back(std::pair<Problem,Solution>(p[i].first,s));
|
||||
}
|
||||
learnDelayed(q,quality);
|
||||
}
|
||||
|
||||
void Shin::MachineLearning::QLearning::learnDelayed(std::vector< std::pair<Shin::Problem, Shin::Solution> >& p, float quality)
|
||||
{
|
||||
for(int i=p.size()-1;i>=0;i--)
|
||||
{
|
||||
auto &pair=p[i];
|
||||
learn(pair.first,pair.second,quality);
|
||||
quality*=0.3;
|
||||
}
|
||||
}
|
||||
101
src/MachineLearning/QLearning.h
Normal file
101
src/MachineLearning/QLearning.h
Normal file
@@ -0,0 +1,101 @@
|
||||
#ifndef _QLEARNING_H_
|
||||
#define _QLEARNING_H_
|
||||
|
||||
#include <cstddef>
|
||||
#include <map>
|
||||
|
||||
#include "Unsupervised.h"
|
||||
#include "../NeuralNetwork/FeedForward.h"
|
||||
|
||||
/*#include "BackPropagation.h"
|
||||
#include "OpticalBackPropagation.h"
|
||||
#include "../FeedForward.h"
|
||||
#include "Unsupervised.h"
|
||||
#include "QFunction.h"
|
||||
*/
|
||||
/*
|
||||
* http://www2.econ.iastate.edu/tesfatsi/RLUsersGuide.ICAC2005.pdf
|
||||
* http://www.autonlab.org/tutorials/rl06.pdf
|
||||
* http://www.nbu.bg/cogs/events/2000/Readings/Petrov/rltutorial.pdf
|
||||
*
|
||||
* http://www.applied-mathematics.net/qlearning/qlearning.html
|
||||
* http://nn.cs.utexas.edu/downloads/papers/stanley.gecco02_1.pdf
|
||||
*
|
||||
* http://stackoverflow.com/questions/740389/good-implementations-of-reinforced-learning
|
||||
*
|
||||
* http://stackoverflow.com/questions/10722064/training-a-neural-network-with-reinforcement-learning
|
||||
*
|
||||
* http://remi.coulom.free.fr/Thesis/
|
||||
* http://remi.coulom.free.fr/Publications/Thesis.pdf
|
||||
*
|
||||
* http://link.springer.com/article/10.1007/BF00992696
|
||||
*
|
||||
* http://scholar.google.cz/scholar?start=10&q=reinforcement+learning+feedforward&hl=en&as_sdt=0,5&as_vis=1
|
||||
*
|
||||
*/
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace MachineLearning
|
||||
{
|
||||
class QLearning
|
||||
{
|
||||
public:
|
||||
inline QLearning() {};
|
||||
virtual ~QLearning() {} ;
|
||||
|
||||
QLearning(const QLearning&) =delete;
|
||||
QLearning& operator=(const QLearning&) =delete;
|
||||
|
||||
virtual void learnDelayed(std::vector<std::pair<Problem,Solution>> &p, float quality) final;
|
||||
virtual void learnDelayed(std::vector<std::pair<Problem,int>> &p, float quality) final;
|
||||
|
||||
virtual void learn(Problem &p,Solution &s, float quality)=0;
|
||||
virtual void learn(Problem &p,int action, float quality)=0;
|
||||
|
||||
inline virtual void setLearningCoeficient(const float& a) {setLearningCoeficient(a,a);};
|
||||
inline void setLearningCoeficient(const float& ok, const float& err) {learningA=ok;learningB=err;};
|
||||
|
||||
virtual Solution getSolution(Problem &p)=0;
|
||||
int getChoice(Problem &p);
|
||||
protected:
|
||||
float learningA=0.05;
|
||||
float learningB=0.008;
|
||||
|
||||
};
|
||||
|
||||
class QLearningNetwork : public QLearning
|
||||
{
|
||||
public:
|
||||
QLearningNetwork(size_t input, size_t size, size_t choices): QLearning(),function({input,size,choices}) {};
|
||||
QLearningNetwork(std::initializer_list<size_t> s): QLearning(),function(s) {};
|
||||
|
||||
QLearningNetwork(const QLearningNetwork&)=delete;
|
||||
QLearningNetwork operator=(const QLearningNetwork&)=delete;
|
||||
|
||||
virtual void learn(Problem &p,Solution &s, float quality) override;
|
||||
virtual void learn(Problem &p,int action, float quality) override;
|
||||
|
||||
virtual Solution getSolution(Problem &p) override {return function.solve(p);}
|
||||
protected:
|
||||
Shin::NeuralNetwork::FeedForward function;
|
||||
};
|
||||
|
||||
class QLearningTable : public QLearning
|
||||
{
|
||||
public:
|
||||
QLearningTable():QLearning(),data() {};
|
||||
|
||||
QLearningTable(const QLearningTable&)=delete;
|
||||
QLearningTable operator=(const QLearningTable&)=delete;
|
||||
|
||||
virtual void learn(Problem &p,Solution &s, float quality) override;
|
||||
virtual void learn(Problem &p,int action, float quality) override;
|
||||
|
||||
virtual Solution getSolution(Problem &p) override;
|
||||
protected:
|
||||
std::map<Problem,std::map<int,std::pair<float,int>>> data;
|
||||
};
|
||||
}
|
||||
}
|
||||
#endif
|
||||
18
src/MachineLearning/Unsupervised.h
Normal file
18
src/MachineLearning/Unsupervised.h
Normal file
@@ -0,0 +1,18 @@
|
||||
#ifndef _UNSUPERVISEDLEARNING_H_
|
||||
#define _UNSUPERVISEDLEARNING_H_
|
||||
|
||||
#include "./Learning.h"
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace MachineLearning
|
||||
{
|
||||
class Unsupervised : public Learning
|
||||
{
|
||||
public:
|
||||
Unsupervised(): Learning() {};
|
||||
virtual ~Unsupervised() {};
|
||||
};
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,6 +1,6 @@
|
||||
#include "FeedForward"
|
||||
|
||||
using namespace Shin::NeuronNetwork;
|
||||
using namespace Shin::NeuralNetwork;
|
||||
|
||||
FFLayer::~FFLayer()
|
||||
{
|
||||
@@ -14,7 +14,7 @@ FFLayer::~FFLayer()
|
||||
}
|
||||
}
|
||||
|
||||
FFNeuron& FFLayer::operator[](size_t neuron)
|
||||
FFNeuron& FFLayer::operator[](const size_t& neuron)
|
||||
{
|
||||
if(neurons==nullptr)
|
||||
{
|
||||
@@ -33,7 +33,7 @@ FFNeuron& FFLayer::operator[](size_t neuron)
|
||||
|
||||
}
|
||||
|
||||
FeedForward::FeedForward(std::initializer_list< int > s, double lam): ACyclicNetwork(lam),layers(s.size())
|
||||
FeedForward::FeedForward(std::initializer_list<size_t> s, double lam): ACyclicNetwork(lam),layers(s.size())
|
||||
{
|
||||
weights= new float**[s.size()];
|
||||
potentials= new float*[s.size()];
|
||||
@@ -158,7 +158,7 @@ void FeedForward::solvePart(float *newSolution, register size_t begin, size_t en
|
||||
}
|
||||
}
|
||||
|
||||
Solution FeedForward::solve(const Problem& p)
|
||||
Shin::Solution FeedForward::solve(const Shin::Problem& p)
|
||||
{
|
||||
register float* sol=sums[0];
|
||||
|
||||
@@ -203,7 +203,7 @@ Solution FeedForward::solve(const Problem& p)
|
||||
return ret;
|
||||
}
|
||||
|
||||
FFLayer& FeedForward::operator[](size_t l)
|
||||
FFLayer& FeedForward::operator[](const size_t& l)
|
||||
{
|
||||
if(ffLayers==nullptr)
|
||||
{
|
||||
@@ -1,9 +1,8 @@
|
||||
#ifndef _S_NN_FF_H_
|
||||
#define _S_NN_FF_H_
|
||||
|
||||
#include "Problem"
|
||||
#include "Solution"
|
||||
#include "Neuron"
|
||||
#include "../Problem"
|
||||
#include "../Solution"
|
||||
#include "Network"
|
||||
|
||||
#include <vector>
|
||||
@@ -23,7 +22,7 @@
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
class FFNeuron : public Neuron
|
||||
{
|
||||
@@ -35,14 +34,14 @@ namespace NeuronNetwork
|
||||
FFNeuron(float &pot, float *w, float &s, float &i,float lam):potential(pot),weights(w),sum(s),inputs(i),lambda(lam) { }
|
||||
|
||||
inline virtual float getPotential() const override {return potential;}
|
||||
inline virtual void setPotential(float p) { potential=p;}
|
||||
inline virtual void setPotential(const float& p) override { potential=p;}
|
||||
|
||||
inline virtual float getWeight(size_t i ) const override { return weights[i];}
|
||||
inline virtual void setWeight(size_t i,float p) override { weights[i]=p; }
|
||||
inline virtual float getWeight(const size_t& i ) const override { return weights[i];}
|
||||
inline virtual void setWeight(const size_t& i,const float &p) override { weights[i]=p; }
|
||||
|
||||
inline virtual float output() const { return sum; }
|
||||
inline virtual float input() const { return inputs; }
|
||||
inline virtual float derivatedOutput() const { return lambda*output()*(1.0-output()); }
|
||||
inline virtual float output() const override { return sum; }
|
||||
inline virtual float input() const override { return inputs; }
|
||||
inline virtual float derivatedOutput() const override { return lambda*output()*(1.0-output()); }
|
||||
protected:
|
||||
float &potential;
|
||||
float *weights;
|
||||
@@ -61,7 +60,7 @@ namespace NeuronNetwork
|
||||
FFLayer(const FFLayer &) = delete;
|
||||
FFLayer& operator=(const FFLayer &) = delete;
|
||||
|
||||
virtual FFNeuron& operator[](size_t layer) override;
|
||||
virtual FFNeuron& operator[](const size_t& layer) override;
|
||||
inline virtual size_t size() const override {return layerSize;};
|
||||
protected:
|
||||
FFNeuron **neurons=nullptr;
|
||||
@@ -76,7 +75,7 @@ namespace NeuronNetwork
|
||||
class FeedForward:public ACyclicNetwork
|
||||
{
|
||||
public:
|
||||
FeedForward(std::initializer_list<int> s, double lam=Shin::NeuronNetwork::lambda);
|
||||
FeedForward(std::initializer_list<size_t> s, double lam=Shin::NeuralNetwork::lambda);
|
||||
virtual ~FeedForward();
|
||||
|
||||
FeedForward(const FeedForward &f) = delete; //TODO
|
||||
@@ -84,7 +83,7 @@ namespace NeuronNetwork
|
||||
|
||||
virtual Solution solve(const Problem& p) override;
|
||||
virtual size_t size() const override { return layers;};
|
||||
virtual FFLayer& operator[](size_t l) override;
|
||||
virtual FFLayer& operator[](const size_t& l) override;
|
||||
protected:
|
||||
void solvePart(float *newSolution, size_t begin, size_t end,size_t prevSize, float* sol,size_t layer);
|
||||
private:
|
||||
@@ -1,12 +1,6 @@
|
||||
#include "./BackPropagation"
|
||||
#include <thread>
|
||||
|
||||
Shin::NeuronNetwork::Learning::BackPropagation::BackPropagation(FeedForward &n): Supervised(n)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
Shin::NeuronNetwork::Learning::BackPropagation::~BackPropagation()
|
||||
Shin::NeuralNetwork::Learning::BackPropagation::~BackPropagation()
|
||||
{
|
||||
if(deltas!=nullptr)
|
||||
{
|
||||
@@ -16,7 +10,7 @@ Shin::NeuronNetwork::Learning::BackPropagation::~BackPropagation()
|
||||
delete[] deltas;
|
||||
}
|
||||
|
||||
void Shin::NeuronNetwork::Learning::BackPropagation::propagate(const Shin::NeuronNetwork::Solution& expectation)
|
||||
void Shin::NeuralNetwork::Learning::BackPropagation::propagate(const Shin::Solution& expectation)
|
||||
{
|
||||
|
||||
if(deltas==nullptr)
|
||||
@@ -93,9 +87,9 @@ void Shin::NeuronNetwork::Learning::BackPropagation::propagate(const Shin::Neuro
|
||||
}
|
||||
|
||||
|
||||
float Shin::NeuronNetwork::Learning::BackPropagation::teach(const Shin::NeuronNetwork::Problem& p, const Shin::NeuronNetwork::Solution& solution)
|
||||
float Shin::NeuralNetwork::Learning::BackPropagation::teach(const Shin::Problem& p, const Shin::Solution& solution)
|
||||
{
|
||||
Shin::NeuronNetwork::Solution a=network.solve(p);
|
||||
Shin::Solution a=network.solve(p);
|
||||
double error=calculateError(solution,a);
|
||||
|
||||
Solution s;
|
||||
@@ -2,11 +2,12 @@
|
||||
#define _BACK_PROPAGATION_H_
|
||||
|
||||
#include <math.h>
|
||||
#include <thread>
|
||||
#include <cstddef>
|
||||
|
||||
#include "../Solution.h"
|
||||
#include "../../Solution.h"
|
||||
#include "../FeedForward.h"
|
||||
#include "Supervised"
|
||||
#include "Learning.h"
|
||||
|
||||
/*
|
||||
* http://sydney.edu.au/engineering/it/~comp4302/ann4-3s.pdf
|
||||
@@ -22,22 +23,24 @@
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
namespace Learning
|
||||
{
|
||||
class BackPropagation : public Supervised
|
||||
class BackPropagation : public Learning
|
||||
{
|
||||
public:
|
||||
BackPropagation(FeedForward &n);
|
||||
BackPropagation(FeedForward &n): Learning(), network(n) {}
|
||||
virtual ~BackPropagation();
|
||||
|
||||
BackPropagation(const Shin::NeuronNetwork::Learning::BackPropagation&) =delete;
|
||||
BackPropagation operator=(const Shin::NeuronNetwork::Learning::BackPropagation&) =delete;
|
||||
BackPropagation(const Shin::NeuralNetwork::Learning::BackPropagation&) =delete;
|
||||
BackPropagation operator=(const Shin::NeuralNetwork::Learning::BackPropagation&) =delete;
|
||||
|
||||
float teach(const Problem &p,const Solution &solution);
|
||||
virtual void propagate(const Solution& expectation);
|
||||
|
||||
float teach(const Shin::NeuronNetwork::Problem &p,const Solution &solution);
|
||||
virtual void propagate(const Shin::NeuronNetwork::Solution& expectation);
|
||||
protected:
|
||||
FeedForward &network;
|
||||
inline virtual float correction(const float& expected, const float& computed) { return expected - computed;};
|
||||
|
||||
float **deltas=nullptr;
|
||||
21
src/NeuralNetwork/Learning/Learning.cpp
Normal file
21
src/NeuralNetwork/Learning/Learning.cpp
Normal file
@@ -0,0 +1,21 @@
|
||||
#include "Learning.h"
|
||||
|
||||
float Shin::NeuralNetwork::Learning::Learning::calculateError(const Shin::Solution& expectation, const Shin::Solution& solution)
|
||||
{
|
||||
register float a=0;
|
||||
for (size_t i=0;i<expectation.size();i++)
|
||||
{
|
||||
a+=pow(expectation[i]-solution[i],2)/2;
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
float Shin::NeuralNetwork::Learning::Learning::teachSet(const std::vector<std::pair<Shin::Problem,Shin::Solution>> &set)
|
||||
{
|
||||
double error=0;
|
||||
for (register size_t i=0;i<set.size();i++)
|
||||
{
|
||||
error+=teach(set[i].first,set[i].second);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
@@ -1,30 +1,23 @@
|
||||
#ifndef _SUPERVISEDLEARNING_H_
|
||||
#define _SUPERVIESDLERANING_H_
|
||||
#ifndef _S_NN_LEARNING_H_
|
||||
#define _S_NN_LEARNING_H_
|
||||
|
||||
#include <vector>
|
||||
#include <set>
|
||||
#include <cstddef>
|
||||
|
||||
#include "../Solution.h"
|
||||
#include "../../Solution.h"
|
||||
#include "../FeedForward.h"
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
namespace Learning
|
||||
{
|
||||
const float LearningCoeficient=0.4;
|
||||
class Supervised
|
||||
class Learning
|
||||
{
|
||||
public:
|
||||
Supervised() =delete;
|
||||
Supervised(FeedForward &n) : network(n) {};
|
||||
virtual ~Supervised() {};
|
||||
|
||||
float calculateError(const Solution &expectation,const Solution &solution);
|
||||
virtual float teach(const Shin::NeuronNetwork::Problem &p,const Solution &solution)=0;
|
||||
virtual float teachSet(const std::vector<std::pair<Problem,Solution>> &set) final;
|
||||
Learning() {};
|
||||
inline virtual ~Learning() {};
|
||||
|
||||
inline virtual void setLearningCoeficient (const float& coef) { learningCoeficient=coef; };
|
||||
|
||||
@@ -35,9 +28,12 @@ namespace Learning
|
||||
inline virtual void disableNoise() final {noise=0;}
|
||||
inline virtual void setNoiseSize(const unsigned& milipercents) final { noiseSize=milipercents; }
|
||||
|
||||
float calculateError(const Solution &expectation,const Solution &solution);
|
||||
virtual float teach(const Problem &p,const Solution &solution)=0;
|
||||
virtual float teachSet(const std::vector<std::pair<Problem,Solution>> &set) final;
|
||||
|
||||
protected:
|
||||
FeedForward &network;
|
||||
float learningCoeficient=Shin::NeuronNetwork::Learning::LearningCoeficient;
|
||||
float learningCoeficient=LearningCoeficient;
|
||||
bool allowThreads=0;
|
||||
bool noise=0;
|
||||
unsigned noiseSize=500;
|
||||
@@ -1,6 +1,6 @@
|
||||
#include "./OpticalBackPropagation"
|
||||
|
||||
float Shin::NeuronNetwork::Learning::OpticalBackPropagation::correction(const float& expected, const float& computed)
|
||||
float Shin::NeuralNetwork::Learning::OpticalBackPropagation::correction(const float& expected, const float& computed)
|
||||
{
|
||||
register float tmp=(expected-computed);
|
||||
register float ret=1+exp(tmp*tmp);
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
namespace Learning
|
||||
{
|
||||
@@ -1,12 +1,10 @@
|
||||
OBJFILES=\
|
||||
FeedForward.o\
|
||||
Learning/Supervised.o Learning/BackPropagation.o Learning/OpticalBackPropagation.o\
|
||||
Learning/Unsupervised.o Learning/Reinforcement.o Learning/RL/QFunction.o Learning/QLearning.o\
|
||||
./IO.o
|
||||
Learning/Learning.o Learning/BackPropagation.o Learning/OpticalBackPropagation.o
|
||||
|
||||
LINKFILES= ../sse_mathfun.o
|
||||
|
||||
LIBNAME=NeuronNetwork
|
||||
LIBNAME=NeuralNetwork
|
||||
|
||||
include ../../Makefile.const
|
||||
|
||||
@@ -17,7 +15,7 @@ lib: $(LIBNAME).so $(LIBNAME).a
|
||||
$(LIBNAME).so: $(OBJFILES)
|
||||
$(CXX) -shared $(CXXFLAGS) $(OBJFILES) $(LINKFILES) -o $(LIBNAME).so
|
||||
|
||||
$(LIBNAME).a: $(OBJFILES) ./Neuron.h ./Network.h ./Solution.h ./Problem.h
|
||||
$(LIBNAME).a: $(OBJFILES) ./Neuron.h ./Network.h ../Solution.h ../Problem.h
|
||||
rm -f $(LIBNAME).a # create new library
|
||||
ar rcv $(LIBNAME).a $(OBJFILES) $(LINKFILES)
|
||||
ranlib $(LIBNAME).a
|
||||
@@ -1,26 +1,25 @@
|
||||
#ifndef _S_NN_NN_H_
|
||||
#define _S_NN_NN_H_
|
||||
|
||||
#include "Problem"
|
||||
#include "Solution"
|
||||
#include "Neuron"
|
||||
|
||||
#include <cstdarg>
|
||||
#include <vector>
|
||||
#include <initializer_list>
|
||||
|
||||
#include <iostream>
|
||||
#include "../Problem.h"
|
||||
#include "../Solution.h"
|
||||
#include "Neuron.h"
|
||||
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
const float lambda=0.8;
|
||||
class Layer
|
||||
{
|
||||
public:
|
||||
virtual ~Layer() {};
|
||||
virtual Neuron& operator[](size_t neuron)=0;
|
||||
virtual Neuron& operator[](const size_t& neuron)=0;
|
||||
virtual size_t size() const=0;
|
||||
};
|
||||
|
||||
@@ -31,7 +30,7 @@ namespace NeuronNetwork
|
||||
virtual ~Network() {};
|
||||
|
||||
virtual Solution solve(const Problem&)=0;
|
||||
virtual Layer& operator[](size_t layer)=0;
|
||||
virtual Layer& operator[](const size_t &layer)=0;
|
||||
inline float getLambda() const {return lambda;}
|
||||
|
||||
inline virtual void setThreads(const unsigned&t) final {threads=t;}
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
class Neuron
|
||||
{
|
||||
@@ -13,10 +13,10 @@ namespace NeuronNetwork
|
||||
Neuron() {};
|
||||
virtual ~Neuron() {};
|
||||
virtual float getPotential() const =0;
|
||||
virtual void setPotential(float p) =0;
|
||||
virtual void setPotential(const float &p) =0;
|
||||
|
||||
virtual float getWeight(size_t) const =0;
|
||||
virtual void setWeight(size_t i,float p) =0;
|
||||
virtual float getWeight(const size_t&) const =0;
|
||||
virtual void setWeight(const size_t& i,const float &p) =0;
|
||||
|
||||
virtual float output() const =0;
|
||||
virtual float input() const=0;
|
||||
@@ -1,38 +0,0 @@
|
||||
#include "./QLearning"
|
||||
|
||||
Shin::NeuronNetwork::Learning::QLearning::QLearning(size_t input, size_t size, size_t choices):fun()
|
||||
{
|
||||
fun.initialiseNetwork(input,size,choices);
|
||||
}
|
||||
|
||||
Shin::NeuronNetwork::Learning::QLearning::~QLearning()
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
void Shin::NeuronNetwork::Learning::QLearning::learnDelayed(std::vector< std::pair< Shin::NeuronNetwork::Solution, Shin::NeuronNetwork::Problem > >& p, float quality)
|
||||
{
|
||||
fun.learnDelayed(p,quality);
|
||||
}
|
||||
|
||||
void Shin::NeuronNetwork::Learning::QLearning::learnDelayed(std::vector< std::pair< Shin::NeuronNetwork::Problem,int > >& p, float quality)
|
||||
{
|
||||
fun.learnDelayed(p,quality);
|
||||
}
|
||||
|
||||
void Shin::NeuronNetwork::Learning::QLearning::learn(Shin::NeuronNetwork::Solution& s, Shin::NeuronNetwork::Problem& p, float quality)
|
||||
{
|
||||
fun.learn(s,p,quality);
|
||||
}
|
||||
|
||||
|
||||
void Shin::NeuronNetwork::Learning::QLearning::learn(Shin::NeuronNetwork::Problem& s, int action, float quality)
|
||||
{
|
||||
fun.learn(s,action,quality);
|
||||
}
|
||||
|
||||
|
||||
int Shin::NeuronNetwork::Learning::QLearning::getChoice(Shin::NeuronNetwork::Problem& p)
|
||||
{
|
||||
return fun.getChoice(p);
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
#ifndef _QLEARNING_H_
|
||||
#define _QLEARNING_H_
|
||||
|
||||
#include <cstddef>
|
||||
#include <functional>
|
||||
|
||||
#include "BackPropagation.h"
|
||||
#include "OpticalBackPropagation.h"
|
||||
#include "../Problem.h"
|
||||
#include "../FeedForward.h"
|
||||
#include "Unsupervised.h"
|
||||
#include "RL/QFunction.h"
|
||||
|
||||
/*
|
||||
* http://www2.econ.iastate.edu/tesfatsi/RLUsersGuide.ICAC2005.pdf
|
||||
* http://www.autonlab.org/tutorials/rl06.pdf
|
||||
* http://www.nbu.bg/cogs/events/2000/Readings/Petrov/rltutorial.pdf
|
||||
*
|
||||
* http://www.applied-mathematics.net/qlearning/qlearning.html
|
||||
* http://nn.cs.utexas.edu/downloads/papers/stanley.gecco02_1.pdf
|
||||
*
|
||||
* http://stackoverflow.com/questions/740389/good-implementations-of-reinforced-learning
|
||||
*
|
||||
* http://stackoverflow.com/questions/10722064/training-a-neural-network-with-reinforcement-learning
|
||||
*
|
||||
* http://remi.coulom.free.fr/Thesis/
|
||||
* http://remi.coulom.free.fr/Publications/Thesis.pdf
|
||||
*
|
||||
* http://link.springer.com/article/10.1007/BF00992696
|
||||
*
|
||||
* http://scholar.google.cz/scholar?start=10&q=reinforcement+learning+feedforward&hl=en&as_sdt=0,5&as_vis=1
|
||||
*
|
||||
*/
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
{
|
||||
namespace Learning
|
||||
{
|
||||
class QLearning
|
||||
{
|
||||
public:
|
||||
QLearning(size_t input, size_t size, size_t choices);
|
||||
~QLearning();
|
||||
|
||||
QLearning(const QLearning&) =delete;
|
||||
QLearning& operator=(const QLearning&) =delete;
|
||||
|
||||
void learnDelayed(std::vector<std::pair<Solution,Problem>> &p, float quality);
|
||||
void learnDelayed(std::vector<std::pair<Problem,int>> &p, float quality);
|
||||
void learn(Solution &s, Problem &p, float quality);
|
||||
void learn(Problem &p,int action, float quality);
|
||||
|
||||
void learnNetwork(double maxError=0.01);
|
||||
void learningCoeficient(double t);
|
||||
|
||||
void initialise(size_t input, size_t size,size_t choices);
|
||||
int getChoice(Problem &p);
|
||||
Solution getSolution(Problem &p) {return fun.getSolution(p);}
|
||||
void setLearningCoeficient(double ok, double err) {fun.setLearningCoeficient(ok,err);};
|
||||
void opticalBackPropagation() {fun.opticalBackPropagation();};
|
||||
protected:
|
||||
RL::QFunctionNetwork fun;
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1 +0,0 @@
|
||||
./Supervised.h
|
||||
@@ -1,20 +0,0 @@
|
||||
#include "./Supervised"
|
||||
float Shin::NeuronNetwork::Learning::Supervised::calculateError(const Shin::NeuronNetwork::Solution& expectation, const Shin::NeuronNetwork::Solution& solution)
|
||||
{
|
||||
register float a=0;
|
||||
for (size_t i=0;i<expectation.size();i++)
|
||||
{
|
||||
a+=pow(expectation[i]-solution[i],2)/2;
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
float Shin::NeuronNetwork::Learning::Supervised::teachSet(const std::vector<std::pair<Shin::NeuronNetwork::Problem,Shin::NeuronNetwork::Solution>> &set)
|
||||
{
|
||||
double error=0;
|
||||
for (register size_t i=0;i<set.size();i++)
|
||||
{
|
||||
error+=teach(set[i].first,set[i].second);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
./Unsupervised.h
|
||||
@@ -1 +0,0 @@
|
||||
#include "./Unsupervised"
|
||||
@@ -1,29 +0,0 @@
|
||||
#ifndef _UNSUPERVISEDLEARNING_H_
|
||||
#define _UNSUPERVISEDLEARNING_H_
|
||||
|
||||
#include <math.h>
|
||||
#include <cstddef>
|
||||
|
||||
#include "../Solution.h"
|
||||
#include "../FeedForward.h"
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
{
|
||||
namespace Learning
|
||||
{
|
||||
class Unsupervised
|
||||
{
|
||||
public:
|
||||
Unsupervised(FeedForward &n): network(n) {};
|
||||
virtual ~Unsupervised() {};
|
||||
|
||||
Unsupervised() =delete;
|
||||
protected:
|
||||
FeedForward &network;
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -6,19 +6,16 @@
|
||||
#include "IO.h"
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
{
|
||||
class Problem : public IO
|
||||
{
|
||||
public:
|
||||
Problem(): IO() {};
|
||||
Problem(std::vector<float> &p):IO(p) {};
|
||||
Problem(const std::vector<float> &p):IO(p) {};
|
||||
Problem(const std::initializer_list<float> &a) : IO(a) {};
|
||||
protected:
|
||||
private:
|
||||
};
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -5,8 +5,6 @@
|
||||
#include "IO.h"
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
{
|
||||
class Solution : public IO
|
||||
{
|
||||
@@ -19,7 +17,6 @@ namespace NeuronNetwork
|
||||
inline void push_back(const float &a) {data.push_back(a);};
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -16,7 +16,7 @@ NN_TESTS= $(NN_TESTEABLE) nn-pong
|
||||
|
||||
ALL_TESTS=$(NN_TESTEABLE) $(GEN_TESTS)
|
||||
|
||||
LIBS=$(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuronNetwork.a
|
||||
LIBS=$(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuralNetwork.a
|
||||
#LIBS=-lGenetics.so -lNeuronNetwork
|
||||
|
||||
CXXFLAGS += -I$(LIB_DIR)
|
||||
@@ -30,10 +30,10 @@ test: all
|
||||
@for i in $(ALL_TESTS); do echo -n ./$$i; echo -n " - "; ./$$i; echo ""; done
|
||||
|
||||
g-%: g-%.cpp $(LIB_DIR)/Genetics.a
|
||||
$(CXX) $(CXXFLAGS) $(OPTIMALIZATION) -o $@ $< $ $(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuronNetwork.a -lm
|
||||
$(CXX) $(CXXFLAGS) $(OPTIMALIZATION) -o $@ $< $ $(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuralNetwork.a -lm
|
||||
|
||||
nn-%: nn-%.cpp $(LIB_DIR)/NeuronNetwork.a
|
||||
$(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuronNetwork.a -lm
|
||||
nn-%: nn-%.cpp $(LIB_DIR)/NeuralNetwork.a
|
||||
$(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuralNetwork.a -lm
|
||||
|
||||
nn-pong: ./nn-pong.cpp $(LIB_DIR)/NeuronNetwork.a
|
||||
$(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuronNetwork.a -lm -lalleg -lGL
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/BackPropagation"
|
||||
#include "../src/NeuralNetwork/FeedForward"
|
||||
#include "../src/NeuralNetwork/Learning/BackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
//typedef Shin::NeuronNetwork::Problem X;
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
class X: public Shin::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :Problem(a) {}
|
||||
@@ -17,18 +16,18 @@ class X: public Shin::NeuronNetwork::Problem
|
||||
int main(int argc,char**)
|
||||
{
|
||||
srand(time(NULL));
|
||||
std::vector<Shin::NeuronNetwork::Solution> s;
|
||||
std::vector<Shin::Solution> s;
|
||||
std::vector<X> p;
|
||||
|
||||
//
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({1})));
|
||||
s.push_back(Shin::Solution(std::vector<float>({1})));
|
||||
p.push_back(X(std::vector<bool>({0})));
|
||||
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0})));
|
||||
s.push_back(Shin::Solution(std::vector<float>({0})));
|
||||
p.push_back(X(std::vector<bool>({1})));
|
||||
|
||||
Shin::NeuronNetwork::FeedForward q({1,5000,5000,15000,2});
|
||||
Shin::NeuronNetwork::Learning::BackPropagation b(q);
|
||||
Shin::NeuralNetwork::FeedForward q({1,5000,5000,15000,2});
|
||||
Shin::NeuralNetwork::Learning::BackPropagation b(q);
|
||||
if(argc > 1)
|
||||
{
|
||||
std::cerr << "THREADING\n";
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/FeedForward.h"
|
||||
#include "../src/NeuralNetwork/FeedForward"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
class X: public Shin::Problem
|
||||
{
|
||||
protected:
|
||||
std::vector<float> representation() const
|
||||
@@ -15,8 +14,8 @@ class X: public Shin::NeuronNetwork::Problem
|
||||
|
||||
int main()
|
||||
{
|
||||
Shin::NeuronNetwork::FeedForward n({2,4,2});
|
||||
Shin::NeuronNetwork::FeedForward nq({2,4,2});
|
||||
Shin::NeuralNetwork::FeedForward n({2,4,2});
|
||||
Shin::NeuralNetwork::FeedForward nq({2,4,2});
|
||||
if(n[1].size() != 4)
|
||||
{
|
||||
std::cout << "Actual size:" << n[0].size();
|
||||
@@ -34,8 +33,8 @@ int main()
|
||||
std::cout << "Potential: " << n[2][0].getPotential() << "\n";
|
||||
std::cout << "Potential: " << nq[2][0].getPotential() << "\n";
|
||||
|
||||
Shin::NeuronNetwork::Solution s =n.solve(X());
|
||||
Shin::NeuronNetwork::Solution sq =nq.solve(X());
|
||||
Shin::Solution s =n.solve(X());
|
||||
Shin::Solution sq =nq.solve(X());
|
||||
|
||||
if(s.size()!=2)
|
||||
{
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/BackPropagation"
|
||||
#include "../src/NeuralNetwork/FeedForward"
|
||||
#include "../src/NeuralNetwork/Learning/BackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
class X: public Shin::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :Problem(),q(a.q) {}
|
||||
@@ -20,21 +19,21 @@ class X: public Shin::NeuronNetwork::Problem
|
||||
|
||||
int main()
|
||||
{
|
||||
std::vector<Shin::NeuronNetwork::Solution> s;
|
||||
std::vector<Shin::Solution> s;
|
||||
std::vector<X> p;
|
||||
|
||||
//
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0})));
|
||||
s.push_back(Shin::Solution(std::vector<float>({0})));
|
||||
p.push_back(X(std::vector<float>({1,0})));
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0})));
|
||||
s.push_back(Shin::Solution(std::vector<float>({0})));
|
||||
p.push_back(X(std::vector<float>({0,1})));
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0})));
|
||||
s.push_back(Shin::Solution(std::vector<float>({0})));
|
||||
p.push_back(X(std::vector<float>({0,0})));
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({1})));
|
||||
s.push_back(Shin::Solution(std::vector<float>({1})));
|
||||
p.push_back(X(std::vector<float>({1,1})));
|
||||
|
||||
Shin::NeuronNetwork::FeedForward q({2,4,1});
|
||||
Shin::NeuronNetwork::Learning::BackPropagation b(q);
|
||||
Shin::NeuralNetwork::FeedForward q({2,4,1});
|
||||
Shin::NeuralNetwork::Learning::BackPropagation b(q);
|
||||
b.setLearningCoeficient(10);
|
||||
|
||||
for(int i=0;i<4;i++)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuralNetwork/FeedForward"
|
||||
|
||||
#include <iostream>
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
class X: public Shin::Problem
|
||||
{
|
||||
public: X(bool x,bool y):Problem() {data.push_back(x);data.push_back(y);}
|
||||
};
|
||||
@@ -10,7 +10,7 @@ int main()
|
||||
{
|
||||
srand(time(NULL));
|
||||
int lm=5;
|
||||
Shin::NeuronNetwork::FeedForward net({2,lm,1});
|
||||
Shin::NeuralNetwork::FeedForward net({2,lm,1});
|
||||
bool x=1;
|
||||
int prev_err=0;
|
||||
int err=0;
|
||||
@@ -47,7 +47,7 @@ int main()
|
||||
{
|
||||
bool x= rand()%2;
|
||||
bool y=rand()%2;
|
||||
Shin::NeuronNetwork::Solution s =net.solve(X(x,y));
|
||||
Shin::Solution s =net.solve(X(x,y));
|
||||
if(s[0]!= (x xor y))
|
||||
err++;
|
||||
}
|
||||
|
||||
@@ -1,38 +1,31 @@
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/BackPropagation"
|
||||
#include "../src/NeuralNetwork/FeedForward"
|
||||
#include "../src/NeuralNetwork/Learning/BackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
class X: public Shin::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :q(a.q) {}
|
||||
X(const std::vector<float> &a):q(a) {}
|
||||
std::vector<float> representation() const
|
||||
{
|
||||
return q;
|
||||
}
|
||||
protected:
|
||||
std::vector<float> q;
|
||||
X(const X& a) :Problem(a.data) {}
|
||||
X(const std::vector<float> &a):Problem(a) {}
|
||||
};
|
||||
|
||||
int main(int argc, char**)
|
||||
{
|
||||
srand(time(NULL));
|
||||
std::vector<Shin::NeuronNetwork::Solution> s;
|
||||
std::vector<Shin::Solution> s;
|
||||
std::vector<X> p;
|
||||
|
||||
//
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({1})));
|
||||
s.push_back(Shin::Solution(std::vector<float>({1})));
|
||||
p.push_back(X(std::vector<float>({0})));
|
||||
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0})));
|
||||
s.push_back(Shin::Solution(std::vector<float>({0})));
|
||||
p.push_back(X(std::vector<float>({1})));
|
||||
|
||||
Shin::NeuronNetwork::FeedForward q({1,5000,5000,5000,1});
|
||||
Shin::NeuronNetwork::Learning::BackPropagation b(q);
|
||||
Shin::NeuralNetwork::FeedForward q({1,5000,5000,5000,1});
|
||||
Shin::NeuralNetwork::Learning::BackPropagation b(q);
|
||||
|
||||
if(argc >1)
|
||||
{
|
||||
@@ -42,6 +35,6 @@ int main(int argc, char**)
|
||||
for(int i=0;i<2;i++)
|
||||
{
|
||||
b.teach(p[i%2],s[i%2]);
|
||||
std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
|
||||
std::cerr << i%2 <<". FOR: [" << p[i%2][0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/BackPropagation"
|
||||
#include "../src/NeuralNetwork/FeedForward"
|
||||
#include "../src/NeuralNetwork/Learning/BackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
class X: public Shin::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :Problem(a) {}
|
||||
@@ -17,14 +17,14 @@ int main()
|
||||
|
||||
for (int test=0;test<2;test++)
|
||||
{
|
||||
Shin::NeuronNetwork::FeedForward q({2,3,1});
|
||||
Shin::NeuronNetwork::Learning::BackPropagation b(q);
|
||||
Shin::NeuralNetwork::FeedForward q({2,3,1});
|
||||
Shin::NeuralNetwork::Learning::BackPropagation b(q);
|
||||
|
||||
std::vector<std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution> > set;
|
||||
set.push_back(std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution>(Shin::NeuronNetwork::Problem({0,0}),Shin::NeuronNetwork::Solution({0})));
|
||||
set.push_back(std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution>(Shin::NeuronNetwork::Problem({1,0}),Shin::NeuronNetwork::Solution({1})));
|
||||
set.push_back(std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution>(Shin::NeuronNetwork::Problem({1,1}),Shin::NeuronNetwork::Solution({0})));
|
||||
set.push_back(std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution>(Shin::NeuronNetwork::Problem({0,1}),Shin::NeuronNetwork::Solution({1})));
|
||||
std::vector<std::pair<Shin::Problem, Shin::Solution> > set;
|
||||
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({0,0}),Shin::Solution({0})));
|
||||
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({1,0}),Shin::Solution({1})));
|
||||
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({1,1}),Shin::Solution({0})));
|
||||
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({0,1}),Shin::Solution({1})));
|
||||
if(test)
|
||||
{
|
||||
std::cerr << "Testing with entropy\n";
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/OpticalBackPropagation"
|
||||
#include "../src/NeuralNetwork/FeedForward"
|
||||
#include "../src/NeuralNetwork/Learning/OpticalBackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
class X: public Shin::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :Problem(a) {}
|
||||
@@ -16,15 +16,15 @@ int main()
|
||||
srand(time(NULL));
|
||||
for (int test=0;test<2;test++)
|
||||
{
|
||||
Shin::NeuronNetwork::FeedForward q({2,40,1});
|
||||
Shin::NeuronNetwork::Learning::OpticalBackPropagation b(q);
|
||||
Shin::NeuralNetwork::FeedForward q({2,40,1});
|
||||
Shin::NeuralNetwork::Learning::OpticalBackPropagation b(q);
|
||||
b.setLearningCoeficient(0.1);
|
||||
|
||||
std::vector<std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution> > set;
|
||||
set.push_back(std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution>(Shin::NeuronNetwork::Problem({0,0}),Shin::NeuronNetwork::Solution({0})));
|
||||
set.push_back(std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution>(Shin::NeuronNetwork::Problem({1,0}),Shin::NeuronNetwork::Solution({1})));
|
||||
set.push_back(std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution>(Shin::NeuronNetwork::Problem({1,1}),Shin::NeuronNetwork::Solution({0})));
|
||||
set.push_back(std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution>(Shin::NeuronNetwork::Problem({0,1}),Shin::NeuronNetwork::Solution({1})));
|
||||
std::vector<std::pair<Shin::Problem, Shin::Solution> > set;
|
||||
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({0,0}),Shin::Solution({0})));
|
||||
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({1,0}),Shin::Solution({1})));
|
||||
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({1,1}),Shin::Solution({0})));
|
||||
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({0,1}),Shin::Solution({1})));
|
||||
if(test)
|
||||
{
|
||||
std::cerr << "Testing with entropy\n";
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/BackPropagation"
|
||||
#include "../src/NeuralNetwork/FeedForward"
|
||||
#include "../src/NeuralNetwork/Learning/BackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
//typedef Shin::NeuronNetwork::Problem X;
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
class X: public Shin::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :Problem(a) {}
|
||||
@@ -17,21 +16,21 @@ class X: public Shin::NeuronNetwork::Problem
|
||||
int main(int argc,char**)
|
||||
{
|
||||
srand(time(NULL));
|
||||
std::vector<Shin::NeuronNetwork::Solution> s;
|
||||
std::vector<Shin::Solution> s;
|
||||
std::vector<X> p;
|
||||
|
||||
p.push_back(X(std::vector<float>({0,0})));
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0.4,0.3,0.2,0.1})));
|
||||
s.push_back(Shin::Solution(std::vector<float>({0.4,0.3,0.2,0.1})));
|
||||
p.push_back(X(std::vector<float>({0,0.5})));
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0.6,0.3,0.2,0.5})));
|
||||
s.push_back(Shin::Solution(std::vector<float>({0.6,0.3,0.2,0.5})));
|
||||
p.push_back(X(std::vector<float>({0.4,0.5})));
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0.4,0.4,0.2,0.8})));
|
||||
Shin::NeuronNetwork::FeedForward q({2,4,4,4},1.0);
|
||||
Shin::NeuronNetwork::Learning::BackPropagation bp(q);
|
||||
s.push_back(Shin::Solution(std::vector<float>({0.4,0.4,0.2,0.8})));
|
||||
Shin::NeuralNetwork::FeedForward q({2,4,4,4},1.0);
|
||||
Shin::NeuralNetwork::Learning::BackPropagation bp(q);
|
||||
bp.setLearningCoeficient(0.2);
|
||||
for(int i=0;i<3;i++)
|
||||
{
|
||||
Shin::NeuronNetwork::Solution sp =q.solve(p[i]);
|
||||
Shin::Solution sp =q.solve(p[i]);
|
||||
std::cerr << sp[0] << "," << sp[1] << "," << sp[2] << "," << sp[3] << "\n";
|
||||
}
|
||||
for(int i=0;i<4;i++)
|
||||
@@ -44,7 +43,7 @@ int main(int argc,char**)
|
||||
std::cerr << "XXXXXXXXXXXX\n";
|
||||
for(int i=0;i<3;i++)
|
||||
{
|
||||
Shin::NeuronNetwork::Solution sp =q.solve(p[i]);
|
||||
Shin::Solution sp =q.solve(p[i]);
|
||||
std::cerr << sp[0] << "," << sp[1] << "," << sp[2] << "," << sp[3] << "\n";
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user