started moving learning algos to new namespace Machnine Learning

This commit is contained in:
2014-12-12 00:01:46 +01:00
parent 2736ede1be
commit b4bee6f498
53 changed files with 405 additions and 354 deletions

View File

@@ -1,6 +1,6 @@
#include "./IO"
Shin::NeuronNetwork::IO Shin::NeuronNetwork::IO::operator+(const IO &r)
Shin::IO Shin::IO::operator+(const IO &r)
{
Shin::NeuronNetwork::IO tmp;
for(float a:this->data)

View File

@@ -6,13 +6,11 @@
namespace Shin
{
namespace NeuronNetwork
{
class IO
{
public:
IO() {};
IO(std::vector<float> &d) : data(d) {}
IO(const std::vector<float> &d) : data(d) {}
IO(const IO &old) : data(old.data) {}
IO(const std::initializer_list<float> &a):data(a) { }
virtual ~IO() {};
@@ -27,5 +25,4 @@ class IO
private:
};
}
}
#endif

View File

@@ -0,0 +1,35 @@
#ifndef _S_ML_LEARNING_H_
#define _S_ML_LEARNING_H_
#include <cstddef>
namespace Shin
{
namespace MachineLearning
{
const float LearningCoeficient=0.4;
const float DefaultNoiseSize=500;
class Learning
{
public:
inline Learning() {};
inline virtual ~Learning() {};
inline virtual void setLearningCoeficient (const float& coef) { learningCoeficient=coef; };
inline virtual void allowThreading() final {allowThreads=1;}
inline virtual void disableThreading() final {allowThreads=0;}
inline virtual void allowNoise() final {noise=1;}
inline virtual void disableNoise() final {noise=0;}
inline virtual void setNoiseSize(const unsigned& milipercents) final { noiseSize=milipercents; }
protected:
float learningCoeficient=Shin::MachineLearning::LearningCoeficient;
bool allowThreads=0;
bool noise=0;
unsigned noiseSize=Shin::MachineLearning::DefaultNoiseSize;
};
}
}
#endif

View File

@@ -0,0 +1,24 @@
OBJFILES=\
QLearning.o
LINKFILES=
LIBNAME=MachineLearning
include ../../Makefile.const
all: lib
lib: $(LIBNAME).so $(LIBNAME).a
$(LIBNAME).so: $(OBJFILES)
$(CXX) -shared $(CXXFLAGS) $(OBJFILES) $(LINKFILES) -o $(LIBNAME).so
$(LIBNAME).a: $(OBJFILES) ./Learning.h
rm -f $(LIBNAME).a # create new library
ar rcv $(LIBNAME).a $(OBJFILES) $(LINKFILES)
ranlib $(LIBNAME).a
nm --demangle $(LIBNAME).a > $(LIBNAME).nm
clean:
@rm -f ./*.o ./*.so ./*.a ./*.nm ./*/*.o

View File

@@ -3,28 +3,29 @@
#include <map>
#include "../../Solution.h"
#include "../../FeedForward.h"
#include "../BackPropagation.h"
#include "../OpticalBackPropagation.h"
#include "Unsupervised.h"
#include "../Solution.h"
//#include "../FeedForward.h"
//#include "BackPropagation.h"
//#include "OpticalBackPropagation.h"
namespace Shin
{
namespace NeuronNetwork
{
namespace RL
namespace MachineLearning
{
class QFunction
{
public:
QFunction();
virtual ~QFunction();
virtual void learnDelayed(std::vector<std::pair<Solution,Problem>> &p, float quality)=0;
virtual void learn(Solution &s, Problem &p, float quality)=0;
//virtual void learnDelayed(std::vector<std::pair<Solution,Problem>> &p, float quality)=0;
//virtual void learn(Solution &s, Problem &p, float quality)=0;
protected:
float learningCoeficient;
};
/*
class QFunctionTable : public QFunction
{
public:
@@ -83,15 +84,11 @@ namespace RL
virtual int getChoice(Problem &p);
virtual Solution getSolution(Problem &p) {return function->solve(p);}
void setLearningCoeficient(double ok, double err) {learningA=ok;learningB=err;};
void opticalBackPropagation() {delete b; b=new Learning::OpticalBackPropagation(*function);};
private:
Learning::BackPropagation *b;
FeedForward * function;
float learningA=0.05;
float learningB=0.008;
};
}
*/
}
}

View File

@@ -0,0 +1,32 @@
#include "./QLearning"
void Shin::MachineLearning::QLearning::learnDelayed(std::vector< std::pair< Shin::Problem, int > >& p, float quality)
{
std::vector<std::pair<Problem,Solution>> q;
register int solSize=0;
if(p.size()>0)
solSize=getSolution(p[0].first).size();
if (!solSize)
return;
for(size_t i=0;i<p.size();i++)
{
Solution s;
for(int j=0;j<solSize;j++)
{
s.push_back(j==p[i].second?1:0);
}
q.push_back(std::pair<Problem,Solution>(p[i].first,s));
}
learnDelayed(q,quality);
}
void Shin::MachineLearning::QLearning::learnDelayed(std::vector< std::pair<Shin::Problem, Shin::Solution> >& p, float quality)
{
for(int i=p.size()-1;i>=0;i--)
{
auto &pair=p[i];
learn(pair.first,pair.second,quality);
quality*=0.3;
}
}

View File

@@ -0,0 +1,101 @@
#ifndef _QLEARNING_H_
#define _QLEARNING_H_
#include <cstddef>
#include <map>
#include "Unsupervised.h"
#include "../NeuralNetwork/FeedForward.h"
/*#include "BackPropagation.h"
#include "OpticalBackPropagation.h"
#include "../FeedForward.h"
#include "Unsupervised.h"
#include "QFunction.h"
*/
/*
* http://www2.econ.iastate.edu/tesfatsi/RLUsersGuide.ICAC2005.pdf
* http://www.autonlab.org/tutorials/rl06.pdf
* http://www.nbu.bg/cogs/events/2000/Readings/Petrov/rltutorial.pdf
*
* http://www.applied-mathematics.net/qlearning/qlearning.html
* http://nn.cs.utexas.edu/downloads/papers/stanley.gecco02_1.pdf
*
* http://stackoverflow.com/questions/740389/good-implementations-of-reinforced-learning
*
* http://stackoverflow.com/questions/10722064/training-a-neural-network-with-reinforcement-learning
*
* http://remi.coulom.free.fr/Thesis/
* http://remi.coulom.free.fr/Publications/Thesis.pdf
*
* http://link.springer.com/article/10.1007/BF00992696
*
* http://scholar.google.cz/scholar?start=10&q=reinforcement+learning+feedforward&hl=en&as_sdt=0,5&as_vis=1
*
*/
namespace Shin
{
namespace MachineLearning
{
class QLearning
{
public:
inline QLearning() {};
virtual ~QLearning() {} ;
QLearning(const QLearning&) =delete;
QLearning& operator=(const QLearning&) =delete;
virtual void learnDelayed(std::vector<std::pair<Problem,Solution>> &p, float quality) final;
virtual void learnDelayed(std::vector<std::pair<Problem,int>> &p, float quality) final;
virtual void learn(Problem &p,Solution &s, float quality)=0;
virtual void learn(Problem &p,int action, float quality)=0;
inline virtual void setLearningCoeficient(const float& a) {setLearningCoeficient(a,a);};
inline void setLearningCoeficient(const float& ok, const float& err) {learningA=ok;learningB=err;};
virtual Solution getSolution(Problem &p)=0;
int getChoice(Problem &p);
protected:
float learningA=0.05;
float learningB=0.008;
};
class QLearningNetwork : public QLearning
{
public:
QLearningNetwork(size_t input, size_t size, size_t choices): QLearning(),function({input,size,choices}) {};
QLearningNetwork(std::initializer_list<size_t> s): QLearning(),function(s) {};
QLearningNetwork(const QLearningNetwork&)=delete;
QLearningNetwork operator=(const QLearningNetwork&)=delete;
virtual void learn(Problem &p,Solution &s, float quality) override;
virtual void learn(Problem &p,int action, float quality) override;
virtual Solution getSolution(Problem &p) override {return function.solve(p);}
protected:
Shin::NeuralNetwork::FeedForward function;
};
class QLearningTable : public QLearning
{
public:
QLearningTable():QLearning(),data() {};
QLearningTable(const QLearningTable&)=delete;
QLearningTable operator=(const QLearningTable&)=delete;
virtual void learn(Problem &p,Solution &s, float quality) override;
virtual void learn(Problem &p,int action, float quality) override;
virtual Solution getSolution(Problem &p) override;
protected:
std::map<Problem,std::map<int,std::pair<float,int>>> data;
};
}
}
#endif

View File

@@ -0,0 +1,18 @@
#ifndef _UNSUPERVISEDLEARNING_H_
#define _UNSUPERVISEDLEARNING_H_
#include "./Learning.h"
namespace Shin
{
namespace MachineLearning
{
class Unsupervised : public Learning
{
public:
Unsupervised(): Learning() {};
virtual ~Unsupervised() {};
};
}
}
#endif

View File

@@ -1,6 +1,6 @@
#include "FeedForward"
using namespace Shin::NeuronNetwork;
using namespace Shin::NeuralNetwork;
FFLayer::~FFLayer()
{
@@ -14,7 +14,7 @@ FFLayer::~FFLayer()
}
}
FFNeuron& FFLayer::operator[](size_t neuron)
FFNeuron& FFLayer::operator[](const size_t& neuron)
{
if(neurons==nullptr)
{
@@ -33,7 +33,7 @@ FFNeuron& FFLayer::operator[](size_t neuron)
}
FeedForward::FeedForward(std::initializer_list< int > s, double lam): ACyclicNetwork(lam),layers(s.size())
FeedForward::FeedForward(std::initializer_list<size_t> s, double lam): ACyclicNetwork(lam),layers(s.size())
{
weights= new float**[s.size()];
potentials= new float*[s.size()];
@@ -158,7 +158,7 @@ void FeedForward::solvePart(float *newSolution, register size_t begin, size_t en
}
}
Solution FeedForward::solve(const Problem& p)
Shin::Solution FeedForward::solve(const Shin::Problem& p)
{
register float* sol=sums[0];
@@ -203,7 +203,7 @@ Solution FeedForward::solve(const Problem& p)
return ret;
}
FFLayer& FeedForward::operator[](size_t l)
FFLayer& FeedForward::operator[](const size_t& l)
{
if(ffLayers==nullptr)
{

View File

@@ -1,9 +1,8 @@
#ifndef _S_NN_FF_H_
#define _S_NN_FF_H_
#include "Problem"
#include "Solution"
#include "Neuron"
#include "../Problem"
#include "../Solution"
#include "Network"
#include <vector>
@@ -23,7 +22,7 @@
namespace Shin
{
namespace NeuronNetwork
namespace NeuralNetwork
{
class FFNeuron : public Neuron
{
@@ -35,14 +34,14 @@ namespace NeuronNetwork
FFNeuron(float &pot, float *w, float &s, float &i,float lam):potential(pot),weights(w),sum(s),inputs(i),lambda(lam) { }
inline virtual float getPotential() const override {return potential;}
inline virtual void setPotential(float p) { potential=p;}
inline virtual void setPotential(const float& p) override { potential=p;}
inline virtual float getWeight(size_t i ) const override { return weights[i];}
inline virtual void setWeight(size_t i,float p) override { weights[i]=p; }
inline virtual float getWeight(const size_t& i ) const override { return weights[i];}
inline virtual void setWeight(const size_t& i,const float &p) override { weights[i]=p; }
inline virtual float output() const { return sum; }
inline virtual float input() const { return inputs; }
inline virtual float derivatedOutput() const { return lambda*output()*(1.0-output()); }
inline virtual float output() const override { return sum; }
inline virtual float input() const override { return inputs; }
inline virtual float derivatedOutput() const override { return lambda*output()*(1.0-output()); }
protected:
float &potential;
float *weights;
@@ -61,7 +60,7 @@ namespace NeuronNetwork
FFLayer(const FFLayer &) = delete;
FFLayer& operator=(const FFLayer &) = delete;
virtual FFNeuron& operator[](size_t layer) override;
virtual FFNeuron& operator[](const size_t& layer) override;
inline virtual size_t size() const override {return layerSize;};
protected:
FFNeuron **neurons=nullptr;
@@ -76,7 +75,7 @@ namespace NeuronNetwork
class FeedForward:public ACyclicNetwork
{
public:
FeedForward(std::initializer_list<int> s, double lam=Shin::NeuronNetwork::lambda);
FeedForward(std::initializer_list<size_t> s, double lam=Shin::NeuralNetwork::lambda);
virtual ~FeedForward();
FeedForward(const FeedForward &f) = delete; //TODO
@@ -84,7 +83,7 @@ namespace NeuronNetwork
virtual Solution solve(const Problem& p) override;
virtual size_t size() const override { return layers;};
virtual FFLayer& operator[](size_t l) override;
virtual FFLayer& operator[](const size_t& l) override;
protected:
void solvePart(float *newSolution, size_t begin, size_t end,size_t prevSize, float* sol,size_t layer);
private:

View File

@@ -1,12 +1,6 @@
#include "./BackPropagation"
#include <thread>
Shin::NeuronNetwork::Learning::BackPropagation::BackPropagation(FeedForward &n): Supervised(n)
{
}
Shin::NeuronNetwork::Learning::BackPropagation::~BackPropagation()
Shin::NeuralNetwork::Learning::BackPropagation::~BackPropagation()
{
if(deltas!=nullptr)
{
@@ -16,7 +10,7 @@ Shin::NeuronNetwork::Learning::BackPropagation::~BackPropagation()
delete[] deltas;
}
void Shin::NeuronNetwork::Learning::BackPropagation::propagate(const Shin::NeuronNetwork::Solution& expectation)
void Shin::NeuralNetwork::Learning::BackPropagation::propagate(const Shin::Solution& expectation)
{
if(deltas==nullptr)
@@ -93,9 +87,9 @@ void Shin::NeuronNetwork::Learning::BackPropagation::propagate(const Shin::Neuro
}
float Shin::NeuronNetwork::Learning::BackPropagation::teach(const Shin::NeuronNetwork::Problem& p, const Shin::NeuronNetwork::Solution& solution)
float Shin::NeuralNetwork::Learning::BackPropagation::teach(const Shin::Problem& p, const Shin::Solution& solution)
{
Shin::NeuronNetwork::Solution a=network.solve(p);
Shin::Solution a=network.solve(p);
double error=calculateError(solution,a);
Solution s;

View File

@@ -2,11 +2,12 @@
#define _BACK_PROPAGATION_H_
#include <math.h>
#include <thread>
#include <cstddef>
#include "../Solution.h"
#include "../../Solution.h"
#include "../FeedForward.h"
#include "Supervised"
#include "Learning.h"
/*
* http://sydney.edu.au/engineering/it/~comp4302/ann4-3s.pdf
@@ -22,22 +23,24 @@
namespace Shin
{
namespace NeuronNetwork
namespace NeuralNetwork
{
namespace Learning
{
class BackPropagation : public Supervised
class BackPropagation : public Learning
{
public:
BackPropagation(FeedForward &n);
BackPropagation(FeedForward &n): Learning(), network(n) {}
virtual ~BackPropagation();
BackPropagation(const Shin::NeuronNetwork::Learning::BackPropagation&) =delete;
BackPropagation operator=(const Shin::NeuronNetwork::Learning::BackPropagation&) =delete;
BackPropagation(const Shin::NeuralNetwork::Learning::BackPropagation&) =delete;
BackPropagation operator=(const Shin::NeuralNetwork::Learning::BackPropagation&) =delete;
float teach(const Problem &p,const Solution &solution);
virtual void propagate(const Solution& expectation);
float teach(const Shin::NeuronNetwork::Problem &p,const Solution &solution);
virtual void propagate(const Shin::NeuronNetwork::Solution& expectation);
protected:
FeedForward &network;
inline virtual float correction(const float& expected, const float& computed) { return expected - computed;};
float **deltas=nullptr;

View File

@@ -0,0 +1,21 @@
#include "Learning.h"
float Shin::NeuralNetwork::Learning::Learning::calculateError(const Shin::Solution& expectation, const Shin::Solution& solution)
{
register float a=0;
for (size_t i=0;i<expectation.size();i++)
{
a+=pow(expectation[i]-solution[i],2)/2;
}
return a;
}
float Shin::NeuralNetwork::Learning::Learning::teachSet(const std::vector<std::pair<Shin::Problem,Shin::Solution>> &set)
{
double error=0;
for (register size_t i=0;i<set.size();i++)
{
error+=teach(set[i].first,set[i].second);
}
return error;
}

View File

@@ -1,30 +1,23 @@
#ifndef _SUPERVISEDLEARNING_H_
#define _SUPERVIESDLERANING_H_
#ifndef _S_NN_LEARNING_H_
#define _S_NN_LEARNING_H_
#include <vector>
#include <set>
#include <cstddef>
#include "../Solution.h"
#include "../../Solution.h"
#include "../FeedForward.h"
namespace Shin
{
namespace NeuronNetwork
namespace NeuralNetwork
{
namespace Learning
{
const float LearningCoeficient=0.4;
class Supervised
class Learning
{
public:
Supervised() =delete;
Supervised(FeedForward &n) : network(n) {};
virtual ~Supervised() {};
float calculateError(const Solution &expectation,const Solution &solution);
virtual float teach(const Shin::NeuronNetwork::Problem &p,const Solution &solution)=0;
virtual float teachSet(const std::vector<std::pair<Problem,Solution>> &set) final;
Learning() {};
inline virtual ~Learning() {};
inline virtual void setLearningCoeficient (const float& coef) { learningCoeficient=coef; };
@@ -35,9 +28,12 @@ namespace Learning
inline virtual void disableNoise() final {noise=0;}
inline virtual void setNoiseSize(const unsigned& milipercents) final { noiseSize=milipercents; }
float calculateError(const Solution &expectation,const Solution &solution);
virtual float teach(const Problem &p,const Solution &solution)=0;
virtual float teachSet(const std::vector<std::pair<Problem,Solution>> &set) final;
protected:
FeedForward &network;
float learningCoeficient=Shin::NeuronNetwork::Learning::LearningCoeficient;
float learningCoeficient=LearningCoeficient;
bool allowThreads=0;
bool noise=0;
unsigned noiseSize=500;

View File

@@ -1,6 +1,6 @@
#include "./OpticalBackPropagation"
float Shin::NeuronNetwork::Learning::OpticalBackPropagation::correction(const float& expected, const float& computed)
float Shin::NeuralNetwork::Learning::OpticalBackPropagation::correction(const float& expected, const float& computed)
{
register float tmp=(expected-computed);
register float ret=1+exp(tmp*tmp);

View File

@@ -10,7 +10,7 @@
namespace Shin
{
namespace NeuronNetwork
namespace NeuralNetwork
{
namespace Learning
{

View File

@@ -1,12 +1,10 @@
OBJFILES=\
FeedForward.o\
Learning/Supervised.o Learning/BackPropagation.o Learning/OpticalBackPropagation.o\
Learning/Unsupervised.o Learning/Reinforcement.o Learning/RL/QFunction.o Learning/QLearning.o\
./IO.o
Learning/Learning.o Learning/BackPropagation.o Learning/OpticalBackPropagation.o
LINKFILES= ../sse_mathfun.o
LIBNAME=NeuronNetwork
LIBNAME=NeuralNetwork
include ../../Makefile.const
@@ -17,7 +15,7 @@ lib: $(LIBNAME).so $(LIBNAME).a
$(LIBNAME).so: $(OBJFILES)
$(CXX) -shared $(CXXFLAGS) $(OBJFILES) $(LINKFILES) -o $(LIBNAME).so
$(LIBNAME).a: $(OBJFILES) ./Neuron.h ./Network.h ./Solution.h ./Problem.h
$(LIBNAME).a: $(OBJFILES) ./Neuron.h ./Network.h ../Solution.h ../Problem.h
rm -f $(LIBNAME).a # create new library
ar rcv $(LIBNAME).a $(OBJFILES) $(LINKFILES)
ranlib $(LIBNAME).a

View File

@@ -1,26 +1,25 @@
#ifndef _S_NN_NN_H_
#define _S_NN_NN_H_
#include "Problem"
#include "Solution"
#include "Neuron"
#include <cstdarg>
#include <vector>
#include <initializer_list>
#include <iostream>
#include "../Problem.h"
#include "../Solution.h"
#include "Neuron.h"
namespace Shin
{
namespace NeuronNetwork
namespace NeuralNetwork
{
const float lambda=0.8;
class Layer
{
public:
virtual ~Layer() {};
virtual Neuron& operator[](size_t neuron)=0;
virtual Neuron& operator[](const size_t& neuron)=0;
virtual size_t size() const=0;
};
@@ -31,7 +30,7 @@ namespace NeuronNetwork
virtual ~Network() {};
virtual Solution solve(const Problem&)=0;
virtual Layer& operator[](size_t layer)=0;
virtual Layer& operator[](const size_t &layer)=0;
inline float getLambda() const {return lambda;}
inline virtual void setThreads(const unsigned&t) final {threads=t;}

View File

@@ -5,7 +5,7 @@
namespace Shin
{
namespace NeuronNetwork
namespace NeuralNetwork
{
class Neuron
{
@@ -13,10 +13,10 @@ namespace NeuronNetwork
Neuron() {};
virtual ~Neuron() {};
virtual float getPotential() const =0;
virtual void setPotential(float p) =0;
virtual void setPotential(const float &p) =0;
virtual float getWeight(size_t) const =0;
virtual void setWeight(size_t i,float p) =0;
virtual float getWeight(const size_t&) const =0;
virtual void setWeight(const size_t& i,const float &p) =0;
virtual float output() const =0;
virtual float input() const=0;

View File

@@ -1,38 +0,0 @@
#include "./QLearning"
Shin::NeuronNetwork::Learning::QLearning::QLearning(size_t input, size_t size, size_t choices):fun()
{
fun.initialiseNetwork(input,size,choices);
}
Shin::NeuronNetwork::Learning::QLearning::~QLearning()
{
}
void Shin::NeuronNetwork::Learning::QLearning::learnDelayed(std::vector< std::pair< Shin::NeuronNetwork::Solution, Shin::NeuronNetwork::Problem > >& p, float quality)
{
fun.learnDelayed(p,quality);
}
void Shin::NeuronNetwork::Learning::QLearning::learnDelayed(std::vector< std::pair< Shin::NeuronNetwork::Problem,int > >& p, float quality)
{
fun.learnDelayed(p,quality);
}
void Shin::NeuronNetwork::Learning::QLearning::learn(Shin::NeuronNetwork::Solution& s, Shin::NeuronNetwork::Problem& p, float quality)
{
fun.learn(s,p,quality);
}
void Shin::NeuronNetwork::Learning::QLearning::learn(Shin::NeuronNetwork::Problem& s, int action, float quality)
{
fun.learn(s,action,quality);
}
int Shin::NeuronNetwork::Learning::QLearning::getChoice(Shin::NeuronNetwork::Problem& p)
{
return fun.getChoice(p);
}

View File

@@ -1,69 +0,0 @@
#ifndef _QLEARNING_H_
#define _QLEARNING_H_
#include <cstddef>
#include <functional>
#include "BackPropagation.h"
#include "OpticalBackPropagation.h"
#include "../Problem.h"
#include "../FeedForward.h"
#include "Unsupervised.h"
#include "RL/QFunction.h"
/*
* http://www2.econ.iastate.edu/tesfatsi/RLUsersGuide.ICAC2005.pdf
* http://www.autonlab.org/tutorials/rl06.pdf
* http://www.nbu.bg/cogs/events/2000/Readings/Petrov/rltutorial.pdf
*
* http://www.applied-mathematics.net/qlearning/qlearning.html
* http://nn.cs.utexas.edu/downloads/papers/stanley.gecco02_1.pdf
*
* http://stackoverflow.com/questions/740389/good-implementations-of-reinforced-learning
*
* http://stackoverflow.com/questions/10722064/training-a-neural-network-with-reinforcement-learning
*
* http://remi.coulom.free.fr/Thesis/
* http://remi.coulom.free.fr/Publications/Thesis.pdf
*
* http://link.springer.com/article/10.1007/BF00992696
*
* http://scholar.google.cz/scholar?start=10&q=reinforcement+learning+feedforward&hl=en&as_sdt=0,5&as_vis=1
*
*/
namespace Shin
{
namespace NeuronNetwork
{
namespace Learning
{
class QLearning
{
public:
QLearning(size_t input, size_t size, size_t choices);
~QLearning();
QLearning(const QLearning&) =delete;
QLearning& operator=(const QLearning&) =delete;
void learnDelayed(std::vector<std::pair<Solution,Problem>> &p, float quality);
void learnDelayed(std::vector<std::pair<Problem,int>> &p, float quality);
void learn(Solution &s, Problem &p, float quality);
void learn(Problem &p,int action, float quality);
void learnNetwork(double maxError=0.01);
void learningCoeficient(double t);
void initialise(size_t input, size_t size,size_t choices);
int getChoice(Problem &p);
Solution getSolution(Problem &p) {return fun.getSolution(p);}
void setLearningCoeficient(double ok, double err) {fun.setLearningCoeficient(ok,err);};
void opticalBackPropagation() {fun.opticalBackPropagation();};
protected:
RL::QFunctionNetwork fun;
};
}
}
}
#endif

View File

@@ -1 +0,0 @@
./Supervised.h

View File

@@ -1,20 +0,0 @@
#include "./Supervised"
float Shin::NeuronNetwork::Learning::Supervised::calculateError(const Shin::NeuronNetwork::Solution& expectation, const Shin::NeuronNetwork::Solution& solution)
{
register float a=0;
for (size_t i=0;i<expectation.size();i++)
{
a+=pow(expectation[i]-solution[i],2)/2;
}
return a;
}
float Shin::NeuronNetwork::Learning::Supervised::teachSet(const std::vector<std::pair<Shin::NeuronNetwork::Problem,Shin::NeuronNetwork::Solution>> &set)
{
double error=0;
for (register size_t i=0;i<set.size();i++)
{
error+=teach(set[i].first,set[i].second);
}
return error;
}

View File

@@ -1 +0,0 @@
./Unsupervised.h

View File

@@ -1 +0,0 @@
#include "./Unsupervised"

View File

@@ -1,29 +0,0 @@
#ifndef _UNSUPERVISEDLEARNING_H_
#define _UNSUPERVISEDLEARNING_H_
#include <math.h>
#include <cstddef>
#include "../Solution.h"
#include "../FeedForward.h"
namespace Shin
{
namespace NeuronNetwork
{
namespace Learning
{
class Unsupervised
{
public:
Unsupervised(FeedForward &n): network(n) {};
virtual ~Unsupervised() {};
Unsupervised() =delete;
protected:
FeedForward &network;
};
}
}
}
#endif

View File

@@ -6,19 +6,16 @@
#include "IO.h"
namespace Shin
{
namespace NeuronNetwork
{
class Problem : public IO
{
public:
Problem(): IO() {};
Problem(std::vector<float> &p):IO(p) {};
Problem(const std::vector<float> &p):IO(p) {};
Problem(const std::initializer_list<float> &a) : IO(a) {};
protected:
private:
};
}
}
#endif

View File

@@ -5,8 +5,6 @@
#include "IO.h"
namespace Shin
{
namespace NeuronNetwork
{
class Solution : public IO
{
@@ -19,7 +17,6 @@ namespace NeuronNetwork
inline void push_back(const float &a) {data.push_back(a);};
};
}
}
#endif