removed Shin::IO and switched to std::vector

This commit is contained in:
2015-08-31 18:26:19 +02:00
parent f68038a79e
commit e61e616227
16 changed files with 18 additions and 111 deletions

1
src/IO
View File

@@ -1 +0,0 @@
./IO.h

View File

@@ -1,15 +0,0 @@
#include "./IO"
Shin::IO Shin::IO::operator+(const IO &r)
{
Shin::NeuronNetwork::IO tmp;
for(float a:this->data)
{
tmp.data.push_back(a);
}
for(float a:r.data)
{
tmp.data.push_back(a);
}
return tmp;
}

View File

@@ -1,28 +0,0 @@
#ifndef _NN_IO_H_
#define _NN_IO_H_
#include <vector>
#include <cstddef>
namespace Shin
{
class IO
{
public:
IO() {};
IO(const std::vector<float> &d) : data(d) {}
IO(const IO &old) : data(old.data) {}
IO(const std::initializer_list<float> &a):data(a) { }
virtual ~IO() {};
IO operator+(const IO &r);
inline virtual operator std::vector<float>&() final {return data;}
inline virtual operator std::vector<float>() final {return data;}
virtual float& operator[] (size_t pos) final { return data[pos];}
virtual float operator[] (size_t pos) const final { return data[pos];}
inline virtual size_t size() const final {return data.size();}
protected:
std::vector<float> data = {};
private:
};
}
#endif

View File

@@ -136,7 +136,7 @@ void LayerNetwork::solvePart(float *newSolution, register size_t begin, size_t e
}
}
Shin::Solution LayerNetwork::solve(const Shin::Problem& p)
std::vector<float> LayerNetwork::solve(const std::vector<float>& p)
{
register float* sol=outputs[0];
@@ -175,7 +175,7 @@ Shin::Solution LayerNetwork::solve(const Shin::Problem& p)
prevSize=layerSizes[i];
sol=newSolution;
}
Shin::Solution ret;
std::vector<float> ret;
for(size_t i=1;i<prevSize;i++)
{
ret.push_back(sol[i]);

View File

@@ -120,7 +120,7 @@ namespace NeuralNetwork
virtual size_t size() const { return layers; };
virtual Shin::Solution solve(const Shin::Problem& p) override;
virtual std::vector<float> solve(const std::vector<float>& input) override;
virtual LayerNetworkLayer& operator[](const size_t& l) override;
protected:

View File

@@ -10,7 +10,7 @@ NeuralNetwork::Learning::BackPropagation::~BackPropagation()
delete[] deltas;
}
void NeuralNetwork::Learning::BackPropagation::propagate(const Shin::Solution& expectation)
void NeuralNetwork::Learning::BackPropagation::propagate(const std::vector<float>& expectation)
{
if(deltas==nullptr)
@@ -87,12 +87,12 @@ void NeuralNetwork::Learning::BackPropagation::propagate(const Shin::Solution& e
}
float NeuralNetwork::Learning::BackPropagation::teach(const Shin::Problem& p, const Shin::Solution& solution)
float NeuralNetwork::Learning::BackPropagation::teach(const std::vector<float>& p, const std::vector<float>& solution)
{
Shin::Solution a=network.solve(p);
std::vector<float> a=network.solve(p);
double error=calculateError(solution,a);
Shin::Solution s;
std::vector<float> s;
if(noise)
{
for(size_t i=0;i<solution.size();i++)

View File

@@ -5,7 +5,6 @@
#include <thread>
#include <cstddef>
#include "../../Solution.h"
#include "../LayerNetwork.h"
#include "Learning.h"
@@ -34,8 +33,8 @@ namespace Learning
BackPropagation(const NeuralNetwork::Learning::BackPropagation&) =delete;
BackPropagation operator=(const NeuralNetwork::Learning::BackPropagation&) =delete;
float teach(const Shin::Problem &p,const Shin::Solution &solution);
virtual void propagate(const Shin::Solution& expectation);
float teach(const std::vector<float>&p,const std::vector<float>&solution);
virtual void propagate(const std::vector<float>& expectation);
protected:
LayerNetwork &network;

View File

@@ -1,6 +1,6 @@
#include "Learning.h"
float NeuralNetwork::Learning::Learning::calculateError(const Shin::Solution& expectation, const Shin::Solution& solution)
float NeuralNetwork::Learning::Learning::calculateError(const std::vector<float>& expectation, const std::vector<float>& solution)
{
register float a=0;
for (size_t i=0;i<expectation.size();i++)
@@ -10,7 +10,7 @@ float NeuralNetwork::Learning::Learning::calculateError(const Shin::Solution& ex
return a;
}
float NeuralNetwork::Learning::Learning::teachSet(const std::vector<std::pair<Shin::Problem,Shin::Solution>> &set)
float NeuralNetwork::Learning::Learning::teachSet(const std::vector<std::pair<std::vector<float>,std::vector<float>>> &set)
{
double error=0;
for (register size_t i=0;i<set.size();i++)

View File

@@ -3,7 +3,6 @@
#include <cstddef>
#include "../../Solution.h"
#include "../FeedForward.h"
namespace NeuralNetwork
@@ -26,9 +25,9 @@ namespace Learning
inline virtual void disableNoise() final {noise=0;}
inline virtual void setNoiseSize(const unsigned& milipercents) final { noiseSize=milipercents; }
float calculateError(const Shin::Solution &expectation,const Shin::Solution &solution);
virtual float teach(const Shin::Problem &p,const Shin::Solution &solution)=0;
virtual float teachSet(const std::vector<std::pair<Shin::Problem,Shin::Solution>> &set) final;
float calculateError(const std::vector<float> &expectation,const std::vector<float> &solution);
virtual float teach(const std::vector<float> &p,const std::vector<float> &solution)=0;
virtual float teachSet(const std::vector<std::pair<std::vector<float>,std::vector<float>>> &set) final;
protected:
float learningCoeficient=LearningCoeficient;

View File

@@ -18,7 +18,7 @@ lib: $(LIBNAME).so $(LIBNAME).a
$(LIBNAME).so: $(OBJFILES)
$(CXX) -shared $(CXXFLAGS) $(OBJFILES) $(LINKFILES) -o $(LIBNAME).so
$(LIBNAME).a: $(OBJFILES) ./Neuron.h ./Network.h ../Solution.h ../Problem.h ./ActivationFunction/ActivationFunction.h ./ActivationFunction/Sigmoid.h
$(LIBNAME).a: $(OBJFILES) ./Neuron.h ./Network.h ./ActivationFunction/ActivationFunction.h ./ActivationFunction/Sigmoid.h
rm -f $(LIBNAME).a # create new library
ar rcv $(LIBNAME).a $(OBJFILES) $(LINKFILES)
ranlib $(LIBNAME).a

View File

@@ -1,11 +1,9 @@
#ifndef _S_NN_NN_H_
#define _S_NN_NN_H_
#include <cstdarg>
#include <cstddef>
#include <vector>
#include "../Problem.h"
#include "../Solution.h"
#include "Neuron.h"
namespace NeuralNetwork
@@ -61,7 +59,7 @@ namespace NeuralNetwork
* @param p is a Problem to be solved
* @returns Solution of Network for Problem
*/
virtual Shin::Solution solve(const Shin::Problem&p)=0;
virtual std::vector<float> solve(const std::vector<float>& input)=0;
/**
* @brief Getter of layer

View File

@@ -1,7 +1,7 @@
#ifndef _S_NN_NEURON_H_
#define _S_NN_NEURON_H_
#include <cstdarg>
#include <cstddef>
namespace NeuralNetwork
{

View File

@@ -1 +0,0 @@
./Problem.h

View File

@@ -1,21 +0,0 @@
#ifndef _P_H_
#define _P_H_
#include <cstddef>
#include <vector>
#include "IO.h"
namespace Shin
{
class Problem : public IO
{
public:
Problem(): IO() {};
Problem(const std::vector<float> &p):IO(p) {};
Problem(const std::initializer_list<float> &a) : IO(a) {};
protected:
private:
};
}
#endif

View File

@@ -1 +0,0 @@
./Solution.h

View File

@@ -1,22 +0,0 @@
#ifndef _SOL_H_
#define _SOL_H_
#include "Problem"
#include "IO.h"
namespace Shin
{
class Solution : public IO
{
public:
Solution(): IO() {}
Solution(const Problem& p) :IO(p) {}
Solution(std::vector<float> &solution):IO(solution) {}
Solution(std::vector<float> solution): IO(solution) {}
Solution(const std::initializer_list<float> &a) : IO(a) {};
inline void push_back(const float &a) {data.push_back(a);};
};
}
#endif