reffactored and recurrent implementation

This commit is contained in:
2016-01-22 13:21:34 +01:00
parent e61e616227
commit d424d87535
65 changed files with 12102 additions and 2361 deletions

View File

@@ -1,8 +1,32 @@
OBJFILES= sse_mathfun.o
include ../Makefile.const
all: $(OBJFILES)
OBJFILES= ./sse_mathfun.o ./NeuralNetwork/Recurrent/Network.o ./NeuralNetwork/Recurrent/Neuron.o
#LayerNetwork.o\
# Learning/Learning.o Learning/BackPropagation.o ../sse_mathfun.o
LINKFILES=
LIBNAME=NeuralNetwork
all: lib
spec:=../include/
%.o : %.cpp $(patsubst ./%.o,../include/%.h,$<)
$(CXX) $(CXXFLAGS) $(OPTIMALIZATION) -I../include -c $< -o $@
lib: $(LIBNAME).so $(LIBNAME).a
$(LIBNAME).so: $(OBJFILES)
$(CXX) -shared $(CXXFLAGS) $(OBJFILES) $(LINKFILES) -o $(LIBNAME).so
$(LIBNAME).a: $(OBJFILES)
rm -f $(LIBNAME).a # create new library
ar rcv $(LIBNAME).a $(OBJFILES) $(LINKFILES)
ranlib $(LIBNAME).a
nm --demangle $(LIBNAME).a > $(LIBNAME).nm
clean:
@rm -f ./*.o ./*.so ./*.a ./*.nm ./*/*.o
@rm -f ./*.o ./*.so ./*.a ./*.nm ./*/*.o

View File

@@ -1,19 +0,0 @@
#ifndef __TRAN_FUN_H_
#define __TRAN_FUN_H_
#include <math.h>
namespace NeuralNetwork
{
namespace ActivationFunction
{
class ActivationFunction
{
public:
virtual ~ActivationFunction() {}
virtual float derivatedOutput(const float &input,const float &output)=0;
virtual float operator()(const float &x)=0;
};
}
}
#endif

View File

@@ -1,22 +0,0 @@
#ifndef __TRAN_HEAVISIDE_H_
#define __TRAN_HEAVISIDE_H_
#include "./ActivationFunction.h"
namespace NeuralNetwork
{
namespace ActivationFunction
{
class Heaviside: public ActivationFunction
{
public:
Sigmoid(const float &lambdaP): lambda(lambdaP) {}
inline virtual float derivatedOutput(const float &input,const float &output) override { return 1.0; }
inline virtual float operator()(const float &x) override { return x>k ? 1.0f : 0.0f; };
protected:
float lambda;
};
}
}
#endif

View File

@@ -1,21 +0,0 @@
#ifndef __TRAN_HYPTAN_H_
#define __TRAN_HYPTAN_H_
#include "./ActivationFunction.h"
namespace NeuralNetwork
{
namespace ActivationFunction
{
class HyperbolicTangent: public ActivationFunction
{
public:
HyperbolicTangent(const float& lam=1):lambda(lam) {}
inline virtual float derivatedOutput(const float&,const float &output) override { return lambda*(1-output*output); }
inline virtual float operator()(const float &x) override { return tanh(lambda*x); };
protected:
float lambda;
};
}
}
#endif

View File

@@ -1,30 +0,0 @@
#ifndef __TRAN_SIGMOID_H_
#define __TRAN_SIGMOID_H_
#include "./StreamingActivationFunction.h"
namespace NeuralNetwork
{
namespace ActivationFunction
{
class Sigmoid: public StreamingActivationFunction
{
public:
Sigmoid(const float lambdaP = 0.8): lambda(lambdaP) {}
inline virtual float derivatedOutput(const float&,const float &output) override { return lambda*output*(1.0f-output); }
inline virtual float operator()(const float &x) override { return 1.0f / (1.0f +exp(-lambda*x) ); };
inline virtual __m128 operator()(__m128 x) override {
x=_mm_mul_ps(temporaryConstLambda,x); //-lambda*sol[k]
x=exp_ps(x); //exp(x)
x= _mm_add_ps(x,temporaryConst1); //1+exp()
x= _mm_div_ps(temporaryConst1,x);//1/....
return x;
}
protected:
float lambda;
__m128 temporaryConst1=_mm_set1_ps(1.0);
__m128 temporaryConstLambda=_mm_set1_ps(-lambda);
};
}
}
#endif

View File

@@ -1,23 +0,0 @@
#ifndef __STREAMINGTRAN_FUN_H_
#define __STREAMINGTRAN_FUN_H_
#include <xmmintrin.h>
#include "../../sse_mathfun.h"
#include "./ActivationFunction.h"
namespace NeuralNetwork
{
namespace ActivationFunction
{
class StreamingActivationFunction : public ActivationFunction
{
public:
virtual float derivatedOutput(const float &input,const float &output)=0;
virtual float operator()(const float &x)=0;
virtual __m128 operator()(__m128)=0; // it must be overriden to be used!
};
}
}
#endif

View File

@@ -1,18 +0,0 @@
#ifndef _BASIS_FUN_H_
#define _BASIS_FUN_H_
#include <math.h>
namespace NeuralNetwork
{
namespace BasisFunction
{
class BasisFunction
{
public:
virtual ~BasisFunction() {}
virtual float operator()(const size_t &inputSize, const float* weights, const float* input)=0;
};
}
}
#endif

View File

@@ -1,63 +0,0 @@
#ifndef __BASIS_FEEDFORWARD_H_
#define __BASIS_FEEDFORWARD_H_
#include "./StreamingBasisFunction.h"
#include <mmintrin.h>
#include <xmmintrin.h>
#include <emmintrin.h>
#include <xmmintrin.h>
#include <pmmintrin.h>
namespace NeuralNetwork
{
namespace BasisFunction
{
class FeedForward: public StreamingBasisFunction
{
public:
FeedForward() {}
inline virtual __m128 operator()(const size_t& inputSize, const float* weights, const float* input, const size_t& alignedPrev)
{
__m128 partialSolution= _mm_setzero_ps();
__m128 w=_mm_setzero_ps();
__m128 sols;
for(register size_t k=alignedPrev;k<inputSize;k++)
{
w = _mm_load_ss(weights+k);
sols = _mm_load_ss(input+k);
w=_mm_mul_ps(w,sols);
partialSolution=_mm_add_ps(partialSolution,w);
}
for(register size_t k=0;k<alignedPrev;k+=sizeof(float)) // TODO ??? sizeof(float)
{
w = _mm_load_ps(weights+k);
sols = _mm_load_ps(input+k);
w=_mm_mul_ps(w,sols);
partialSolution=_mm_add_ps(partialSolution,w);
}
#ifdef USE_SSE2 //pre-SSE3 solution
partialSolution= _mm_add_ps(_mm_movehl_ps(partialSolution, partialSolution), partialSolution);
partialSolution=_mm_add_ss(partialSolution, _mm_shuffle_ps(partialSolution,partialSolution, 1));
#else
partialSolution = _mm_hadd_ps(partialSolution, partialSolution);
partialSolution = _mm_hadd_ps(partialSolution, partialSolution);
#endif
return partialSolution;
}
inline virtual float operator()(const size_t &inputSize, const float* weights, const float* input)
{
register float tmp = 0;
for(register size_t k=0;k<inputSize;k++)
{
tmp+=input[k]*weights[k];
}
return tmp;
}
};
}
}
#endif

View File

@@ -1,17 +0,0 @@
#ifndef __BASIS_RADIAL_H_
#define __BASIS_RADIAL_H_
#include "./BasisFunction.h"
namespace NeuralNetwork
{
namespace BasisFunction
{
class Radial: public BasisFunction
{
public:
Radial() {}
};
}
}
#endif

View File

@@ -1,22 +0,0 @@
#ifndef __STREAMINGBASIS_FUN_H_
#define __STREAMINGBASIS_FUN_H_
#include <xmmintrin.h>
#include "../../sse_mathfun.h"
#include "./BasisFunction.h"
namespace NeuralNetwork
{
namespace BasisFunction
{
class StreamingBasisFunction : public BasisFunction
{
public:
virtual float operator()(const size_t &inputSize, const float* weights, const float* input) = 0;
virtual __m128 operator()(const size_t& inputSize, const float* weights, const float* input, const size_t& alignedPrev) =0;
};
}
}
#endif

View File

@@ -1,18 +0,0 @@
#ifndef _S_NN_FF_H_
#define _S_NN_FF_H_
#include "LayerNetwork.h"
namespace NeuralNetwork
{
class FeedForward : public LayerNetwork
{
public:
FeedForward(std::initializer_list<size_t> s, double lam=NeuralNetwork::lambda,
LayerNetworkInitializer weightInit=
[](const size_t&, const size_t &, const size_t &)->float{ return 1.0-((float)(rand()%2001))/1000.0;}
) : LayerNetwork(s,lam,weightInit) {}
};
}
#endif

View File

@@ -1,201 +0,0 @@
#include "LayerNetwork.h"
using namespace NeuralNetwork;
LayerNetworkLayer::~LayerNetworkLayer()
{
if(neurons!=nullptr)
{
for(size_t i=0;i<layerSize-1;i++)
{
delete neurons[i];
}
delete[] neurons;
}
}
LayerNetworkNeuron& LayerNetworkLayer::operator[](const size_t& neuron)
{
if(neurons==nullptr)
{
neurons=new LayerNetworkNeuron*[layerSize-1];
for(size_t i=1;i<layerSize;i++)
{
neurons[i-1]=new LayerNetworkNeuron(weights[i],outputs[i],inputs[i],lambda,function);
}
}
if(neuron>=layerSize)
throw std::out_of_range("Not so many neurons in layers.");
return *neurons[neuron];
}
LayerNetwork::LayerNetwork(std::initializer_list<size_t> s, double lam, LayerNetworkInitializer weightInit): Network(),layers(s.size())
{
transfer = new ActivationFunction::ActivationFunction*[s.size()];
weights= new float**[s.size()];
layerSizes= new size_t[s.size()];
outputs= new float*[s.size()];
inputs= new float*[s.size()];
register int i=0;
register int prev_size=1;
for(int layeSize:s) // TODO rename
{
transfer[i]= new ActivationFunction::Sigmoid(lam);
layeSize+=1;
if(i==0)
{
prev_size=layeSize;
}
layerSizes[i]=layeSize;
weights[i]= new float*[layeSize];
outputs[i]= new float[layeSize];
inputs[i]= new float[layeSize];
outputs[i][0]=1.0;
for (int j=1;j<layeSize;j++)
{
weights[i][j]= new float[prev_size];
for(int k=0;k<prev_size;k++)
{
weights[i][j][k]=weightInit(i,j,k);
}
}
i++;
prev_size=layeSize;
}
}
LayerNetwork::~LayerNetwork()
{
if(weights != nullptr)
{
for(size_t i=0;i<layers;i++)
{
for (size_t j=1;j<layerSizes[i];j++)
{
delete[] weights[i][j];
}
delete[] weights[i];
delete[] outputs[i];
delete[] inputs[i];
delete transfer[i];
}
delete[] weights;
delete[] layerSizes;
delete[] outputs;
delete[] inputs;
delete[] transfer;
}
if(ffLayers !=nullptr)
{
for(size_t i=0;i<layers;i++)
{
delete ffLayers[i];
}
delete[] ffLayers;
}
delete basisFunction;
}
void LayerNetwork::solvePart(float *newSolution, register size_t begin, size_t end,size_t prevSize, float *sol,size_t layer)
{
ActivationFunction::StreamingActivationFunction *function=dynamic_cast<ActivationFunction::StreamingActivationFunction*>(transfer[layer]);
BasisFunction::StreamingBasisFunction *bFunc=dynamic_cast<BasisFunction::StreamingBasisFunction*>(basisFunction);
size_t alignedPrev=prevSize>16?(prevSize-(prevSize%16)):0;
__m128 partialSolution;
if(prevSize >=4 && function !=nullptr && bFunc != nullptr)
{
for( size_t j=begin;j<end;j++)
{
partialSolution=bFunc->operator()(prevSize,weights[layer][j],sol,alignedPrev);
_mm_store_ss(inputs[layer]+j,partialSolution);
partialSolution=function->operator()(partialSolution);
_mm_store_ss(newSolution+j,partialSolution);
}
}else
{
for( size_t j=begin;j<end;j++)
{
if (bFunc !=nullptr && prevSize >=4)
{
partialSolution=bFunc->operator()(prevSize,weights[layer][j],sol,alignedPrev);
_mm_store_ss(inputs[layer]+j,partialSolution);
newSolution[j]=transfer[layer]->operator()(inputs[layer][j]);
}else
{
const float tmp=basisFunction->operator()(prevSize,weights[layer][j],sol);
inputs[layer][j]=tmp;
newSolution[j]=transfer[layer]->operator()(tmp);
}
}
}
}
std::vector<float> LayerNetwork::solve(const std::vector<float>& p)
{
register float* sol=outputs[0];
if(p.size()+1 != layerSizes[0])
{
throw std::out_of_range("Wrong number of inputs");
}
sol[0]=1;
for(size_t i=0;i<p.size();i++)
sol[i+1]=p[i];
register size_t prevSize=layerSizes[0];
for(register size_t i=1;i<layers;i++)
{
float* newSolution= outputs[i];
if(threads > 1 && (layerSizes[i] > 700 || prevSize > 700)) // 700 is an guess about actual size, when creating thread has some speedup
{
std::vector<std::thread> th;
size_t s=1;
register size_t step =layerSizes[i]/threads;
for(size_t t=1;t<threads;t++)
{
th.push_back(std::thread([i,this,newSolution,prevSize,sol](size_t from, size_t to)->void{
solvePart(newSolution,from,to,prevSize,sol,i);
},s,s+step));
s+=step;
}
solvePart(newSolution,s,layerSizes[i],prevSize,sol,i);
for (auto& thr : th)
thr.join();
}else
{
solvePart(newSolution,1,layerSizes[i],prevSize,sol,i);
}
prevSize=layerSizes[i];
sol=newSolution;
}
std::vector<float> ret;
for(size_t i=1;i<prevSize;i++)
{
ret.push_back(sol[i]);
}
return ret;
}
LayerNetworkLayer& LayerNetwork::operator[](const size_t& l)
{
if(ffLayers==nullptr)
{
ffLayers=new LayerNetworkLayer*[layers];
for(size_t i=0;i<layers;i++)
{
ffLayers[i]=new LayerNetworkLayer(layerSizes[i],weights[i],outputs[i],inputs[i],lambda,*transfer[i]);
}
}
if(l>=layers)
throw std::out_of_range("Not so many layers in network.");
return *ffLayers[l];
}

View File

@@ -1,140 +0,0 @@
#ifndef _NN_LN_H_
#define _NN_LN_H_
#include "Network.h"
#include "ActivationFunction/Sigmoid.h"
#include "BasisFunction/FeedForward.h"
#include <vector>
#include <initializer_list>
#include <thread>
#include <pthread.h>
#include <iostream>
#include <math.h>
#include <mmintrin.h>
#include <xmmintrin.h>
#include <emmintrin.h>
#include <xmmintrin.h>
#include <pmmintrin.h>
#include "../sse_mathfun.h"
namespace NeuralNetwork
{
class LayerNetworkNeuron : public Neuron
{
public:
inline LayerNetworkNeuron(float *w, float &outputF, float &i,float lam,ActivationFunction::ActivationFunction &fun):function(fun),weights(w),out(outputF),inputs(i),lambda(lam) { }
LayerNetworkNeuron() = delete;
LayerNetworkNeuron(const LayerNetworkNeuron&) = delete;
LayerNetworkNeuron& operator=(const LayerNetworkNeuron&) = delete;
inline virtual float getWeight(const int& i ) const override { return weights[i+1]; }
inline virtual void setWeight(const int& i,const float &p) override { weights[i+1]=p; }
inline virtual float output() const override { return out; }
inline virtual float input() const override { return inputs; }
inline virtual float derivatedOutput() const override { return function.derivatedOutput(inputs,out); }
inline virtual float getBias() const override { return weights[0]; }
inline virtual void setBias(const float & bias) override { weights[0]=bias; }
protected:
ActivationFunction::ActivationFunction &function;
float *weights;
float &out;
float &inputs;
float lambda;
private:
};
class LayerNetworkLayer: public Layer
{
public:
inline LayerNetworkLayer(size_t s, float **w,float *out,float *in,float lam,ActivationFunction::ActivationFunction &fun): function(fun), layerSize(s),weights(w),outputs(out),inputs(in),lambda(lam) {}
~LayerNetworkLayer();
LayerNetworkLayer(const LayerNetworkLayer &) = delete;
LayerNetworkLayer& operator=(const LayerNetworkLayer &) = delete;
virtual LayerNetworkNeuron& operator[](const size_t& neuron) override;
inline virtual size_t size() const override {return layerSize-1;};
protected:
ActivationFunction::ActivationFunction &function;
LayerNetworkNeuron **neurons=nullptr;
size_t layerSize;
float **weights;
float *outputs;
float *inputs;
float lambda;
};
/**
* @brief typedef for LayerNetwork network initializating function
*/
typedef std::function<float(const size_t&layer, const size_t &neuron, const size_t &weight)> LayerNetworkInitializer;
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Class representing LayerNetwork network
* @see ACyclicNetwork
*
* @b Usage:
* @code
* Shin::NeuralNetwork::LayerNetwork net({1,5,2});
* net.setThreads(2); // it alows network to use 2 threads if it needs to.
* Shin::Solution sol = net.solve(Shin::Problem(0.1)) // and finaly, solve Problem
* @endcode
*/
class LayerNetwork:public Network
{
public:
/**
* @brief Constructor for LayerNetwork
* @param s is initiaizer for layers (it's sizes)
* @param lam is parametr for TransferFunction
* @param weightInit is weight initializer function
*/
LayerNetwork(std::initializer_list<size_t> s, double lam=NeuralNetwork::lambda,
LayerNetworkInitializer weightInit=
[](const size_t&, const size_t &, const size_t &)->float{ return 1.0-((float)(rand()%2001))/1000.0;}
);
virtual ~LayerNetwork();
/**
* @brief we don't want to allow network to be copied
*/
LayerNetwork(const LayerNetwork &f) = delete; //TODO
/**
* @brief we don't want to allow network to be assigned
*/
LayerNetwork operator=(const LayerNetwork &f)=delete;
/**
* @brief computes output Solution from input Problem
*/
virtual size_t size() const { return layers; };
virtual std::vector<float> solve(const std::vector<float>& input) override;
virtual LayerNetworkLayer& operator[](const size_t& l) override;
protected:
void solvePart(float *newSolution, size_t begin, size_t end,size_t prevSize, float* sol,size_t layer);
private:
LayerNetworkLayer **ffLayers=nullptr;
float ***weights=nullptr;
float **outputs=nullptr;
float **inputs=nullptr;
ActivationFunction::ActivationFunction **transfer=nullptr;
BasisFunction::BasisFunction *basisFunction = new BasisFunction::FeedForward();
size_t *layerSizes=nullptr;
size_t layers;/**< Number of layers */
};
}
#endif

View File

@@ -1 +0,0 @@
./BackPropagation.h

View File

@@ -1,109 +0,0 @@
#include "./BackPropagation"
NeuralNetwork::Learning::BackPropagation::~BackPropagation()
{
if(deltas!=nullptr)
{
for(size_t i=0;i<network.size();i++)
delete[] deltas[i];
}
delete[] deltas;
}
void NeuralNetwork::Learning::BackPropagation::propagate(const std::vector<float>& expectation)
{
if(deltas==nullptr)
{
deltas=new float*[network.size()];
for(size_t i=0;i<network.size();i++)
{
deltas[i]=new float[network[i].size()];
deltas[i][0]=0.0;
}
}
for(size_t j=1;j<network[network.size()-1].size();j++)
{
deltas[network.size()-1][j]= correction(expectation[j-1],network[network.size()-1][j].output())
*network[network.size()-1][j].derivatedOutput();
}
for(int i=(int)network.size()-2;i>0;i--)
{
if(allowThreads)
{
std::vector<std::thread> th;
size_t s=0;
//TODO THIS IS NOT WORKING!!!
#define THREADS 4
int step =network[i].size()/THREADS;
for(int t=1;t<=THREADS;t++)
{
if(s>=network[i].size())
break;
th.push_back(std::thread([&i,this](size_t from, size_t to)->void{
for(size_t j=from;j<to;j++)
{
register float deltasWeight = 0;
for(size_t k=1;k<this->network[i+1].size();k++)
{
deltasWeight+=deltas[i+1][k]*this->network[i+1][k].getWeight(j);
}
//deltas[i][j]*=this->network[i]->operator[](j)->derivatedOutput(); // WHY THE HELL IS SEQ here??
}
},s,t==THREADS?network[i].size():s+step));//{}
s+=step;
}
for (auto& thr : th)
thr.join();
}else
{
for(size_t j=0;j<network[i].size();j++)
{
register float deltasWeight = 0;
for(size_t k=1;k<this->network[i+1].size();k++)
{
deltasWeight+=deltas[i+1][k]*this->network[i+1][k].getWeight(j);
}
deltas[i][j]=deltasWeight*this->network[i][j].derivatedOutput();
}
}
}
for(size_t i=1;i<network.size();i++)
{
size_t max=network[i-1].size();
for(size_t j=1;j<network[i].size();j++)
{
network[i][j].setWeight(0,network[i][j].getWeight(0)+deltas[i][j]*learningCoeficient);
for(size_t k=1;k<max;k++)
{
network[i][j].setWeight(k, network[i][j].getWeight(k)+learningCoeficient*deltas[i][j]*network[i-1][k].output());
}
}
}
}
float NeuralNetwork::Learning::BackPropagation::teach(const std::vector<float>& p, const std::vector<float>& solution)
{
std::vector<float> a=network.solve(p);
double error=calculateError(solution,a);
std::vector<float> s;
if(noise)
{
for(size_t i=0;i<solution.size();i++)
{
s.push_back(solution[i]*((double)((100000-noiseSize)+(rand()%(noiseSize*2+1)))/100000.0));
}
propagate(s);
}else
{
propagate(solution);
}
return error;
}

View File

@@ -1,47 +0,0 @@
#ifndef _BACK_PROPAGATION_H_
#define _BACK_PROPAGATION_H_
#include <math.h>
#include <thread>
#include <cstddef>
#include "../LayerNetwork.h"
#include "Learning.h"
/*
* http://sydney.edu.au/engineering/it/~comp4302/ann4-3s.pdf
* http://www.cs.cmu.edu/afs/cs/academic/class/15883-f13/slides/backprop.pdf
* http://airccse.org/journal/jcsit/0211ijcsit08.pdf
* http://www.cedar.buffalo.edu/~srihari/CSE574/Chap5/Chap5.3-BackProp.pdf
* http://stackoverflow.com/questions/13095938/can-somebody-please-explain-the-backpropagation-algorithm-to-me
* http://ufldl.stanford.edu/wiki/index.php/Backpropagation_Algorithm
*
* http://www.cleveralgorithms.com/nature-inspired/neural/backpropagation.html
*
*/
namespace NeuralNetwork
{
namespace Learning
{
class BackPropagation : public Learning
{
public:
BackPropagation(LayerNetwork &n): Learning(), network(n) {}
virtual ~BackPropagation();
BackPropagation(const NeuralNetwork::Learning::BackPropagation&) =delete;
BackPropagation operator=(const NeuralNetwork::Learning::BackPropagation&) =delete;
float teach(const std::vector<float>&p,const std::vector<float>&solution);
virtual void propagate(const std::vector<float>& expectation);
protected:
LayerNetwork &network;
inline virtual float correction(const float& expected, const float& computed) { return expected - computed;};
float **deltas=nullptr;
};
}
}
#endif

View File

@@ -1,21 +0,0 @@
#include "Learning.h"
float NeuralNetwork::Learning::Learning::calculateError(const std::vector<float>& expectation, const std::vector<float>& solution)
{
register float a=0;
for (size_t i=0;i<expectation.size();i++)
{
a+=pow(expectation[i]-solution[i],2)/2;
}
return a;
}
float NeuralNetwork::Learning::Learning::teachSet(const std::vector<std::pair<std::vector<float>,std::vector<float>>> &set)
{
double error=0;
for (register size_t i=0;i<set.size();i++)
{
error+=teach(set[i].first,set[i].second);
}
return error;
}

View File

@@ -1,40 +0,0 @@
#ifndef _S_NN_LEARNING_H_
#define _S_NN_LEARNING_H_
#include <cstddef>
#include "../FeedForward.h"
namespace NeuralNetwork
{
namespace Learning
{
const float LearningCoeficient=0.4;
class Learning
{
public:
Learning() {};
inline virtual ~Learning() {};
inline virtual void setLearningCoeficient (const float& coef) { learningCoeficient=coef; };
inline virtual void allowThreading() final {allowThreads=1;}
inline virtual void disableThreading() final {allowThreads=0;}
inline virtual void allowNoise() final {noise=1;}
inline virtual void disableNoise() final {noise=0;}
inline virtual void setNoiseSize(const unsigned& milipercents) final { noiseSize=milipercents; }
float calculateError(const std::vector<float> &expectation,const std::vector<float> &solution);
virtual float teach(const std::vector<float> &p,const std::vector<float> &solution)=0;
virtual float teachSet(const std::vector<std::pair<std::vector<float>,std::vector<float>>> &set) final;
protected:
float learningCoeficient=LearningCoeficient;
bool allowThreads=0;
bool noise=0;
unsigned noiseSize=500;
};
}
}
#endif

View File

@@ -1 +0,0 @@
./OpticalBackPropagation.h

View File

@@ -1,28 +0,0 @@
#ifndef _OPT_BACK_PROPAGATION_H_
#define _OPT_BACK_PROPAGATION_H_
#include "BackPropagation.h"
/*
* http://proceedings.informingscience.org/InSITE2005/P106Otai.pdf
*/
namespace NeuralNetwork
{
namespace Learning
{
class OpticalBackPropagation : public BackPropagation
{
public:
inline OpticalBackPropagation(LayerNetwork &n): BackPropagation(n) {}
protected:
virtual float correction(const float& expected, const float& computed) override
{
register float tmp=(expected-computed);
register float ret=1+exp(tmp*tmp);
return tmp < 0? -ret:ret;
};
};
}
}
#endif

View File

@@ -1,28 +0,0 @@
OBJFILES=\
LayerNetwork.o\
Learning/Learning.o Learning/BackPropagation.o ../sse_mathfun.o
LINKFILES=
LIBNAME=NeuralNetwork
include ../../Makefile.const
all: lib
../sse_mathfun.o: ../sse_mathfun.cpp ../sse_mathfun.h
make -C ../
lib: $(LIBNAME).so $(LIBNAME).a
$(LIBNAME).so: $(OBJFILES)
$(CXX) -shared $(CXXFLAGS) $(OBJFILES) $(LINKFILES) -o $(LIBNAME).so
$(LIBNAME).a: $(OBJFILES) ./Neuron.h ./Network.h ./ActivationFunction/ActivationFunction.h ./ActivationFunction/Sigmoid.h
rm -f $(LIBNAME).a # create new library
ar rcv $(LIBNAME).a $(OBJFILES) $(LINKFILES)
ranlib $(LIBNAME).a
nm --demangle $(LIBNAME).a > $(LIBNAME).nm
clean:
@rm -f ./*.o ./*.so ./*.a ./*.nm ./*/*.o

View File

@@ -1,93 +0,0 @@
#ifndef _S_NN_NN_H_
#define _S_NN_NN_H_
#include <cstddef>
#include <vector>
#include "Neuron.h"
namespace NeuralNetwork
{
/**
* @brief Default value for lambda
*/
const float lambda=0.8;
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Abstract class for all Layers of neurons
*/
class Layer
{
public:
virtual ~Layer() {};
/**
* @brief This is a virtual function for selecting neuron
* @param neuron is position in layer
* @returns Specific neuron
*/
virtual Neuron& operator[](const size_t& neuron)=0;
/**
* @returns Size of layer
*/
virtual size_t size() const=0;
};
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Abstract model of simple Network
*/
class Network
{
public:
/**
* @brief Constructor for Network
* @param lam is parametr for many TransferFunctions
*/
inline Network() {};
/**
* @brief Virtual destructor for Network
*/
virtual ~Network() {};
/**
* @brief This is a virtual function for all networks
* @param p is a Problem to be solved
* @returns Solution of Network for Problem
*/
virtual std::vector<float> solve(const std::vector<float>& input)=0;
/**
* @brief Getter of layer
* @param layer is position fo layer
* @returns Retruns specified layer
*/
virtual Layer& operator[](const size_t &layer)=0;
/**
* @brief Returns parametr for TransferFunctions
* @returns lambda (parametr for TransferFunctions)
*/
inline float getLambda() const {return lambda;}
/**
* @param t is number of threads, if set to 0 or 1 then threading is disabled
* @brief Enables or disables Threaded computing of ANN
*/
inline virtual void setThreads(const unsigned&t) final {threads=t;}
protected:
/**
* @brief Number of threads used by network
*/
unsigned threads=1;
};
}
#endif

View File

@@ -1,57 +0,0 @@
#ifndef _S_NN_NEURON_H_
#define _S_NN_NEURON_H_
#include <cstddef>
namespace NeuralNetwork
{
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Abstract class of neuron. All Neuron classes should derive from this on
*/
class Neuron
{
public:
/**
* @brief virtual destructor for Neuron
*/
virtual ~Neuron() {};
virtual float getWeight(const int &w) const =0;
/**
* @brief Sets weight
* @param i is number of neuron
* @param p is new weight for input neuron i
*/
virtual void setWeight(const int& i ,const float &p) =0;
/**
* @brief Returns output of neuron
*/
virtual float output() const =0;
/**
* @brief Returns input of neuron
*/
virtual float input() const=0;
/**
* @brief Returns value for derivation of activation function
*/
virtual float derivatedOutput() const=0;
/**
* @brief Function sets bias for neuron
* @param biad is new bias (initial value for neuron)
*/
virtual void setBias(const float &bias)=0;
/**
* @brief Function returns bias for neuron
*/
virtual float getBias() const=0;
protected:
};
}
#endif

View File

@@ -1,33 +0,0 @@
#ifndef _S_NN_PERCEP_H_
#define _S_NN_PERCEP_H_
#include "./FeedForward"
#include "TransferFunction/Heaviside.h"
namespace NeuralNetwork
{
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Class reprezenting Perceptron - network with only 2 layer (input and output) with Heaviside transfer function
*/
class Perceptron:public FeedForward
{
public:
/**
* @brief Constructor for Perceptron network
* @param inputSize size of input Problem
* @param outputSize size of output Solution
*/
Perceptron(const size_t &inputSize, const size_t &outputSize):FeedForward({inputSize,outputSize})
{
// < iterate throuht layers and set them to Heaviside Function
for(int i=0;i<layers;i++)
{
delete transfer[i];
transfer[i]= new TransferFunction::Heaviside(0.5);
}
};
};
}
#endif

View File

@@ -0,0 +1,50 @@
#include <NeuralNetwork/Recurrent/Network.h>
std::vector<float> NeuralNetwork::Recurrent::Network::computeOutput(const std::vector<float>& input, unsigned int iterations) {
//TODO: check inputSize
size_t neuronSize=neurons.size();
std::vector<float> outputs(neuronSize);
for(size_t i=0;i<inputSize;i++) {
outputs[i]=input[i];
}
for(unsigned int iter=0;iter< iterations;iter++) {
for(size_t i=inputSize;i<neuronSize;i++) {
outputs[i]=neurons[i].output();
}
// update neurons
for(size_t i=inputSize;i<neuronSize;i++) {
neurons[i](outputs);
}
}
std::vector<float> ret;
for(size_t i=0;i<outputSize;i++) {
ret.push_back(neurons[i+inputSize].output());
}
return ret;
}
void NeuralNetwork::Recurrent::Network::stringify(std::ostream& out) const {
out <<std::setprecision(std::numeric_limits<float>::digits10+1);
out << "{\n";
out << "\t\"class\":\"NeuralNetwork::Recurrent::Network\",\n";
out << "\t\"size\":" << neurons.size() << ",\n";
out << "\t\"inputs\":" << inputSize << ",\n";
out << "\t\"outputs\":" << outputSize << ",\n";
out << "\t\"neurons\":[";
for(size_t i=0;i<neurons.size();i++) {
if(i!=0)
out << ",\n";
out << neurons[i].stringify("\t\t");
}
out << "\n\t]\n";
out <<"}";
}

View File

@@ -0,0 +1,25 @@
#include <NeuralNetwork/Recurrent/Neuron.h>
std::string NeuralNetwork::Recurrent::Neuron::stringify(const std::string &prefix) const {
std::ostringstream out;
out.precision(std::numeric_limits<float>::digits10+1);
out <<std::setprecision(std::numeric_limits<float>::digits10+1);
out << prefix << "{\n";
out << prefix << "\t\"class\": \"NeuralNetwork::Recurrent::Neuron\",\n";
out << prefix << "\t\"id\": " << id() << ",\n";
out << prefix << "\t\"bias\": " << getBias() << ",\n";
out << prefix << "\t\"output\": " << output() << ",\n";
out << prefix << "\t\"value\": " << value() << ",\n";
out << prefix << "\t\"activationFunction\": " << activation->stringify() <<",\n";
out << prefix << "\t\"basisFunction\": " << basis->stringify() <<",\n";
out << prefix << "\t\"weights\": [";
for(size_t j=0;j<weights.size();j++) {
if(j!=0)
out << ", ";
out << weights[j];
}
out << "]\n";
out << prefix << "}";
return out.str();
}

11091
src/sse_mathfun.as Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
#include "./sse_mathfun.h"
#include "../include/sse_mathfun.h"
#include <xmmintrin.h>
@@ -174,12 +174,10 @@ v4sf exp_ps(v4sf x) {
#endif
v4sf one = *(v4sf*)_ps_1;
x = _mm_min_ps(x, *(v4sf*)_ps_exp_hi);
x = _mm_max_ps(x, *(v4sf*)_ps_exp_lo);
x = _mm_max_ps( _mm_min_ps(x, *(v4sf*)_ps_exp_hi), *(v4sf*)_ps_exp_lo);
/* express exp(x) as exp(g + n*log(2)) */
fx = _mm_mul_ps(x, *(v4sf*)_ps_cephes_LOG2EF);
fx = _mm_add_ps(fx, *(v4sf*)_ps_0p5);
fx = _mm_add_ps(_mm_mul_ps(x, *(v4sf*)_ps_cephes_LOG2EF), *(v4sf*)_ps_0p5);
/* how to perform a floorf with SSE: just below */
#ifndef USE_SSE2
@@ -195,36 +193,25 @@ v4sf exp_ps(v4sf x) {
#endif
/* if greater, substract 1 */
v4sf mask = _mm_cmpgt_ps(tmp, fx);
mask = _mm_and_ps(mask, one);
fx = _mm_sub_ps(tmp, mask);
fx = _mm_sub_ps(tmp, _mm_and_ps(mask, one));
tmp = _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C1);
v4sf z = _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C2);
x = _mm_sub_ps(x, tmp);
x = _mm_sub_ps(x, z);
x = _mm_sub_ps(x, _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C1));
x = _mm_sub_ps(x, _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C2));
z = _mm_mul_ps(x,x);
v4sf y = *(v4sf*)_ps_cephes_exp_p0;
y = _mm_mul_ps(y, x);
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p1);
y = _mm_mul_ps(y, x);
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p2);
y = _mm_mul_ps(y, x);
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p3);
y = _mm_mul_ps(y, x);
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p4);
y = _mm_mul_ps(y, x);
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p5);
y = _mm_mul_ps(y, z);
y = _mm_add_ps(_mm_mul_ps(y, x), *(v4sf*)_ps_cephes_exp_p1);
y = _mm_add_ps(_mm_mul_ps(y, x), *(v4sf*)_ps_cephes_exp_p2);
y = _mm_add_ps(_mm_mul_ps(y, x), *(v4sf*)_ps_cephes_exp_p3);
y = _mm_add_ps(_mm_mul_ps(y, x), *(v4sf*)_ps_cephes_exp_p4);
y = _mm_add_ps(_mm_mul_ps(y, x), *(v4sf*)_ps_cephes_exp_p5);
y = _mm_mul_ps(y, _mm_mul_ps(x,x));
y = _mm_add_ps(y, x);
y = _mm_add_ps(y, one);
/* build 2^n */
#ifndef USE_SSE2
z = _mm_movehl_ps(z, fx);
mm0 = _mm_cvttps_pi32(fx);
mm1 = _mm_cvttps_pi32(z);
mm1 = _mm_cvttps_pi32(_mm_movehl_ps( _mm_mul_ps(x,x), fx));
mm0 = _mm_add_pi32(mm0, *(v2si*)_pi32_0x7f);
mm1 = _mm_add_pi32(mm1, *(v2si*)_pi32_0x7f);
mm0 = _mm_slli_pi32(mm0, 23);
@@ -234,10 +221,8 @@ v4sf exp_ps(v4sf x) {
COPY_MM_TO_XMM(mm0, mm1, pow2n);
_mm_empty();
#else
emm0 = _mm_cvttps_epi32(fx);
emm0 = _mm_add_epi32(emm0, *(v4si*)_pi32_0x7f);
emm0 = _mm_slli_epi32(emm0, 23);
v4sf pow2n = _mm_castsi128_ps(emm0);
emm0 = _mm_add_epi32(_mm_cvttps_epi32(fx), *(v4si*)_pi32_0x7f);
v4sf pow2n = _mm_castsi128_ps(_mm_slli_epi32(emm0, 23));
#endif
y = _mm_mul_ps(y, pow2n);
return y;

View File

@@ -1,63 +0,0 @@
#ifndef _SSE_MATH_FUN_
#define _SSE_MATH_FUN_
#include <xmmintrin.h>
/* yes I know, the top of this file is quite ugly */
#ifdef _MSC_VER /* visual c++ */
# define ALIGN16_BEG __declspec(align(16))
# define ALIGN16_END
#else /* gcc or icc */
# define ALIGN16_BEG
# define ALIGN16_END __attribute__((aligned(16)))
#endif
/* __m128 is ugly to write */
typedef __m128 v4sf; // vector of 4 float (sse1)
#ifdef USE_SSE2
# include <emmintrin.h>
typedef __m128i v4si; // vector of 4 int (sse2)
#else
typedef __m64 v2si; // vector of 2 int (mmx)
#endif
/* natural logarithm computed for 4 simultaneous float
return NaN for x <= 0
*/
v4sf log_ps(v4sf x);
#ifndef USE_SSE2
typedef union xmm_mm_union {
__m128 xmm;
__m64 mm[2];
} xmm_mm_union;
#define COPY_XMM_TO_MM(xmm_, mm0_, mm1_) { \
xmm_mm_union u; u.xmm = xmm_; \
mm0_ = u.mm[0]; \
mm1_ = u.mm[1]; \
}
#define COPY_MM_TO_XMM(mm0_, mm1_, xmm_) { \
xmm_mm_union u; u.mm[0]=mm0_; u.mm[1]=mm1_; xmm_ = u.xmm; \
}
#endif // USE_SSE2
v4sf exp_ps(v4sf x);
v4sf sin_ps(v4sf x);
/* almost the same as sin_ps */
v4sf cos_ps(v4sf x);
/* since sin_ps and cos_ps are almost identical, sincos_ps could replace both of them..
it is almost as fast, and gives you a free cosine with your sine */
void sincos_ps(v4sf x, v4sf *s, v4sf *c);
#endif