reffactored and recurrent implementation

This commit is contained in:
2016-01-22 13:21:34 +01:00
parent e61e616227
commit d424d87535
65 changed files with 12102 additions and 2361 deletions

View File

@@ -0,0 +1,42 @@
#pragma once
#include <string>
namespace NeuralNetwork {
namespace ActivationFunction {
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Abstract class of activation function
*/
class ActivationFunction {
public:
virtual ~ActivationFunction() {}
/**
* @brief Returns derivation of output
* @param input is input of function
* @param output is output of function
*/
virtual float derivatedOutput(const float &input,const float &output)=0;
/**
* @brief Returns value of output
* @param x is input of function
*/
virtual float operator()(const float &x)=0;
/**
* @brief Function returns clone of object
*/
virtual ActivationFunction* clone() const = 0;
/**
* @brief This is a virtual function for storing Activation function
* @returns json describing function
*/
virtual std::string stringify() const =0;
};
}
}

View File

@@ -0,0 +1,26 @@
#pragma once
#include "./ActivationFunction.h"
namespace NeuralNetwork {
namespace ActivationFunction {
class Heaviside: public ActivationFunction {
public:
Heaviside(const float &lambdaP=1.0): lambda(lambdaP) {}
inline virtual float derivatedOutput(const float &,const float &) override { return 1.0; }
inline virtual float operator()(const float &x) override { return x>lambda ? 1.0f : 0.0f; };
virtual ActivationFunction* clone() const override {
return new Heaviside(lambda);
}
virtual std::string stringify() const override {
return "{ \"class\": \"NeuralNetwork::ActivationFunction::Heaviside\", \"lamba\" : "+std::to_string(lambda)+"}";
}
protected:
float lambda;
};
}
}

View File

@@ -0,0 +1,27 @@
#pragma once
#include "./ActivationFunction.h"
#include <cmath>
namespace NeuralNetwork {
namespace ActivationFunction {
class HyperbolicTangent: public ActivationFunction {
public:
HyperbolicTangent(const float& lam=1):lambda(lam) {}
inline virtual float derivatedOutput(const float&,const float &output) override { return lambda*(1-output*output); }
inline virtual float operator()(const float &x) override { return tanh(lambda*x); };
virtual ActivationFunction* clone() const override {
return new HyperbolicTangent(lambda);
}
virtual std::string stringify() const override {
return "{ \"class\": \"NeuralNetwork::ActivationFunction::HyperbolicTangent\", \"lamba\" : "+std::to_string(lambda)+"}";
}
protected:
float lambda;
};
}
}

View File

@@ -0,0 +1,35 @@
#pragma once
#include <cmath>
#include "./StreamingActivationFunction.h"
#include "../../sse_mathfun.h"
namespace NeuralNetwork {
namespace ActivationFunction {
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Class for computing sigmoid
*/
class Sigmoid: public StreamingActivationFunction {
public:
Sigmoid(const float lambdaP = -0.5): lambda(lambdaP) {}
inline virtual float derivatedOutput(const float&,const float &output) override { return lambda*output*(1.0f-output); }
inline virtual float operator()(const float &x) override { return 1.0f / (1.0f +exp(lambda*x) ); };
inline virtual __m128 operator()(const __m128 &x) override {
// exp_ps is extremly slow!
return _mm_div_ps(_mm_set1_ps(1.0),_mm_add_ps(exp_ps(_mm_mul_ps(_mm_set1_ps(lambda),x)),_mm_set1_ps(1.0)));
}
virtual ActivationFunction* clone() const override {
return new Sigmoid(lambda);
}
virtual std::string stringify() const override {
return "{ \"class\": \"NeuralNetwork::ActivationFunction::Sigmoid\", \"lamba\" : "+std::to_string(lambda)+"}";
}
protected:
float lambda;
};
}
}

View File

@@ -0,0 +1,26 @@
#pragma once
#include <xmmintrin.h>
#include "./ActivationFunction.h"
namespace NeuralNetwork {
namespace ActivationFunction {
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Abstract class of activation function with support of SSE
*/
class StreamingActivationFunction : public ActivationFunction {
public:
virtual float derivatedOutput(const float &input,const float &output)=0;
virtual float operator()(const float &x)=0;
/**
* @brief Returns value of four outputs
* @param x is float[4], in every array value can be stored
*/
virtual __m128 operator()(const __m128 &x)=0;
};
}
}

View File

@@ -0,0 +1,27 @@
#pragma once
#include <math.h>
#include <vector>
#include <string>
namespace NeuralNetwork {
namespace BasisFunction {
class BasisFunction {
public:
virtual ~BasisFunction() {}
virtual float operator()(const std::vector<float>& weights, const std::vector<float>& input)=0;
/**
* @brief Function returns clone of object
*/
virtual BasisFunction* clone() const = 0;
/**
* @brief This is a virtual function for storing Basis function
* @returns json describing function
*/
virtual std::string stringify() const =0;
};
}
}

View File

@@ -0,0 +1,68 @@
#pragma once
#include <mmintrin.h>
#include <xmmintrin.h>
#include <emmintrin.h>
#include <xmmintrin.h>
#include <pmmintrin.h>
#include "./StreamingBasisFunction.h"
#include "../../sse_mathfun.h"
namespace NeuralNetwork {
namespace BasisFunction {
class Linear: public StreamingBasisFunction {
public:
Linear() {}
inline virtual float computeStreaming(const std::vector<float>& weights, const std::vector<float>& input) override {
size_t inputSize=input.size();
size_t alignedPrev=inputSize-inputSize%4;
const float* weightsData=weights.data();
const float* inputData=input.data();
vec4f partialSolution;
partialSolution.sse =_mm_setzero_ps();
//TODO prefetch ??
for(register size_t k=0;k<alignedPrev;k+=4) {
partialSolution.sse=_mm_add_ps(partialSolution.sse,_mm_mul_ps(_mm_load_ps(weightsData+k),_mm_load_ps(inputData+k)));
}
for(register size_t k=alignedPrev;k<inputSize;k++) {
partialSolution.sse=_mm_add_ps(partialSolution.sse,_mm_mul_ps(_mm_load_ss(weightsData+k),_mm_load_ss(inputData+k)));
}
#ifdef USE_SSE2 //pre-SSE3 solution
partialSolution.sse= _mm_add_ps(_mm_movehl_ps(partialSolution.sse, partialSolution.sse), partialSolution.sse);
partialSolution.sse=_mm_add_ss(partialSolution.sse, _mm_shuffle_ps(partialSolution.sse,partialSolution.sse, 1));
#else
partialSolution.sse = _mm_hadd_ps(partialSolution.sse, partialSolution.sse);
partialSolution.sse = _mm_hadd_ps(partialSolution.sse, partialSolution.sse);
#endif
return partialSolution.f[0];
}
inline virtual float compute(const std::vector<float>& weights, const std::vector<float>& input) override {
register float tmp = 0;
size_t inputSize=input.size();
for(size_t k=0;k<inputSize;k++) {
tmp+=input[k]*weights[k];
}
return tmp;
}
virtual BasisFunction* clone() const override {
return new Linear();
}
virtual std::string stringify() const override {
return "{ \"class\": \"NeuralNetwork::BasisFunction::Linear\" }";
}
};
}
}

View File

@@ -0,0 +1,25 @@
#ifndef __BASIS_RADIAL_H_
#define __BASIS_RADIAL_H_
#include "./BasisFunction.h"
namespace NeuralNetwork
{
namespace BasisFunction
{
class Radial: public BasisFunction
{
public:
Radial() {}
virtual BasisFunction* clone() const override {
return new Radial();
}
virtual std::string stringify() const override {
return "{ \"class\": \"NeuralNetwork::BasisFunction::Radial\" }";
}
};
}
}
#endif

View File

@@ -0,0 +1,23 @@
#pragma once
#include <xmmintrin.h>
#include "./BasisFunction.h"
namespace NeuralNetwork {
namespace BasisFunction {
class StreamingBasisFunction : public BasisFunction {
public:
union vec4f{
__m128 sse;
float f[4];
};
virtual float operator()(const std::vector<float>& weights, const std::vector<float>& input) override {
return computeStreaming(weights,input);
}
virtual float computeStreaming(const std::vector<float>& weights, const std::vector<float>& input) =0;
virtual float compute(const std::vector<float>& weights, const std::vector<float>& input) =0;
};
}
}

View File

@@ -0,0 +1,33 @@
#pragma once
#include <cstddef>
#include <vector>
#include "Neuron.h"
namespace NeuralNetwork
{
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Abstract class for all Layers of neurons
*/
class Layer
{
public:
virtual ~Layer() {};
/**
* @brief This is a virtual function for selecting neuron
* @param neuron is position in layer
* @returns Specific neuron
*/
virtual Neuron& operator[](const size_t& neuron)=0;
/**
* @returns Size of layer
*/
virtual size_t size() const=0;
};
}

View File

@@ -0,0 +1,55 @@
#pragma once
#include <cstddef>
#include <vector>
#include "Neuron.h"
#include "Stringifiable.h"
#include <ostream>
#include <sstream>
namespace NeuralNetwork
{
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Abstract model of simple Network
*/
class Network : public Stringifiable
{
public:
/**
* @brief Constructor for Network
*/
inline Network() {};
/**
* @brief Virtual destructor for Network
*/
virtual ~Network() {};
/**
* @brief This is a virtual function for all networks
* @param input is input of network
* @returns output of network
*/
virtual std::vector<float> computeOutput(const std::vector<float>& input)=0;
/**
* @param t is number of threads, if set to 0 or 1 then threading is disabled
* @brief Enables or disables Threaded computing of ANN
*/
inline virtual void setThreads(const unsigned& t) final {threads=t;}
using Stringifiable::stringify;
protected:
/**
* @brief Number of threads used by network
*/
unsigned threads=1;
};
}

View File

@@ -0,0 +1,71 @@
#pragma once
#include <string>
namespace NeuralNetwork
{
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Abstract class of neuron. All Neuron classes should derive from this on
*/
class Neuron
{
public:
/**
* @brief returns unique id for neuron
*/
virtual unsigned long id() const =0;
/**
* @brief virtual destructor for Neuron
*/
virtual ~Neuron() {};
/**
* @brief This is a virtual function for storing network
* @returns json describing network and it's state
*/
virtual std::string stringify(const std::string &prefix="") const =0;
/**
* @brief Gets weight
* @param n is neuron
*/
virtual float getWeight(const Neuron &n) const =0;
/**
* @brief Sets weight
* @param n is neuron
* @param w is new weight for input neuron n
*/
virtual void setWeight(const Neuron& n ,const float &w) =0;
/**
* @brief Returns output of neuron
*/
virtual float output() const =0;
/**
* @brief Returns input of neuron
*/
virtual float value() const=0;
/**
* @brief Returns value for derivation of activation function
*/
// virtual float derivatedOutput() const=0;
/**
* @brief Function sets bias for neuron
* @param bias is new bias (initial value for neuron)
*/
virtual void setBias(const float &bias)=0;
/**
* @brief Function returns bias for neuron
*/
virtual float getBias() const=0;
protected:
};
}

View File

@@ -0,0 +1,88 @@
#pragma once
#include "../Network.h"
#include "Neuron.h"
#include <vector>
#include <sstream>
#include <iomanip>
#include <limits>
namespace NeuralNetwork {
namespace Recurrent {
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Reccurent model of Artifical neural network
*/
class Network: public NeuralNetwork::Network {
public:
/**
* @brief Constructor for Network
* @param _inputSize is number of inputs to network
* @param _outputSize is size of output from network
* @param hiddenUnits is number of hiddenUnits to be created
*/
inline Network(size_t _inputSize, size_t _outputSize,size_t hiddenUnits=0):NeuralNetwork::Network(),inputSize(_inputSize),outputSize(_outputSize), neurons(0) {
for(size_t i=0;i<_inputSize+_outputSize;i++) {
addNeuron();
}
for(size_t i=0;i<hiddenUnits;i++) {
addNeuron();
}
};
// todo: implement
inline Network(const std::string &json) {
}
/**
* @brief Virtual destructor for Network
*/
virtual ~Network() {};
/**
* @brief This is a function to compute one iterations of network
* @param input is input of network
* @returns output of network
*/
inline virtual std::vector<float> computeOutput(const std::vector<float>& input) override {
return computeOutput(input,1);
}
/**
* @brief This is a function to compute iterations of network
* @param input is input of network
* @param iterations is number of iterations
* @returns output of network
*/
std::vector<float> computeOutput(const std::vector<float>& input, unsigned int iterations);
std::vector<Neuron>& getNeurons () {
return neurons;
}
using NeuralNetwork::Network::stringify;
void stringify(std::ostream& out) const override;
Neuron& addNeuron() {
neurons.push_back(Recurrent::Neuron(neurons.size()));
Neuron &newNeuron=neurons.back();
for(size_t i=0;i<neurons.size();i++) {
neurons[i].setWeight(newNeuron,0.0);
}
return newNeuron;
}
protected:
size_t inputSize=0;
size_t outputSize=0;
std::vector<Recurrent::Neuron> neurons;
};
}
}

View File

@@ -0,0 +1,123 @@
#pragma once
#include "../Neuron.h"
#include <NeuralNetwork/ActivationFunction/Sigmoid.h>
#include <NeuralNetwork/BasisFunction/Linear.h>
#include <vector>
#include <sstream>
#include <iomanip>
#include <limits>
namespace NeuralNetwork {
namespace Recurrent {
class Network;
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Class of recurrent neuron.
*/
class Neuron : public NeuralNetwork::Neuron
{
public:
Neuron(unsigned long _id=0,const float& _bias = 0): NeuralNetwork::Neuron(), basis(new BasisFunction::Linear),
activation(new ActivationFunction::Sigmoid(-4.9)),
id_(_id),bias(_bias),weights(_id+1),_output(0),_value(0) {
}
Neuron(const Neuron &r): NeuralNetwork::Neuron(), basis(r.basis->clone()), activation(r.activation->clone()),id_(r.id_),
bias(r.bias), weights(r.weights), _output(r._output), _value(r._value) {
}
virtual ~Neuron() {
delete basis;
delete activation;
};
virtual std::string stringify(const std::string &prefix="") const override;
Recurrent::Neuron& operator=(const NeuralNetwork::Recurrent::Neuron&r) {
id_=r.id_;
bias=r.bias;
weights=r.weights;
basis=r.basis->clone();
activation=r.activation->clone();
return *this;
}
virtual long unsigned int id() const override {
return id_;
};
/**
* @brief Gets weight
* @param n is neuron
*/
virtual float getWeight(const NeuralNetwork::Neuron &n) const override {
return weights[n.id()];
}
/**
* @brief Sets weight
* @param n is neuron
* @param w is new weight for input neuron n
*/
virtual void setWeight(const NeuralNetwork::Neuron& n ,const float &w) override {
if(weights.size()<n.id()+1) {
weights.resize(n.id()+1);
}
weights[n.id()]=w;
}
/**
* @brief Returns output of neuron
*/
virtual float output() const override {
return _output;
}
/**
* @brief Returns input of neuron
*/
virtual float value() const override {
return _value;
}
/**
* @brief Function sets bias for neuron
* @param _bias is new bias (initial value for neuron)
*/
virtual void setBias(const float &_bias) override {
bias=_bias;
}
/**
* @brief Function returns bias for neuron
*/
virtual float getBias() const override {
return bias;
}
float operator()(const std::vector<float>& inputs) {
//compute value
_value=basis->operator()(weights,inputs)+bias;
//compute output
_output=activation->operator()(_value);
return _output;
}
protected:
BasisFunction::BasisFunction *basis;
ActivationFunction::ActivationFunction *activation;
unsigned long id_;
float bias;
std::vector<float> weights;
float _output;
float _value;
};
}
}

View File

@@ -0,0 +1,29 @@
#pragma once
#include <sstream>
namespace NeuralNetwork {
class Stringifiable {
public:
virtual ~Stringifiable() {
}
/**
* @brief This is a virtual function for class
*/
virtual void stringify(std::ostream& out) const =0;
virtual std::string stringify() final {
std::ostringstream s;
stringify(s);
return s.str();
}
};
static std::ostream& operator<<(std::ostream& o, const Stringifiable& n) {
n.stringify(o);
return o;
}
}

80
include/Tools/Array.h Normal file
View File

@@ -0,0 +1,80 @@
#pragma once
namespace Array {
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Template of array for simple usage
*/
template <typename T>
class Array {
public:
Array(unsigned long size=0):arr(size==0? nullptr: new T[size]),_size(0) {
}
Array (const Array& r):arr(r.arr),_size(r._size) {
}
~Array() {
}
inline Array& operator=(const Array& r) {
arr=r.arr;
_size=r._size;
}
inline void resize(unsigned long size) {
if(arr==nullptr) {
arr=new T[size];
_size=size;
}else {
T* tmp=new T[size];
for(unsigned long i=0;i<_size;i++) {
tmp[i]=arr[i];
}
delete[] arr;
arr=tmp;
}
}
inline void free() {
delete[] arr;
arr=nullptr;
_size=0;
}
inline const T& operator[](unsigned long i) const {
return arr[i];
}
inline T& operator[](unsigned long i) {
return arr[i];
}
unsigned long size() const {
return _size;
}
protected:
T* arr;
unsigned long _size;
private:
};
template <typename T>
class DynamicArray: public Array<T> {
public:
DynamicArray(unsigned long size=0,float _scaleFactor=1):Array<T>(size),scaleFactor(_scaleFactor) {
}
protected:
float scaleFactor;
private:
};
}

63
include/sse_mathfun.h Normal file
View File

@@ -0,0 +1,63 @@
#ifndef _SSE_MATH_FUN_
#define _SSE_MATH_FUN_
#include <xmmintrin.h>
/* yes I know, the top of this file is quite ugly */
#ifdef _MSC_VER /* visual c++ */
# define ALIGN16_BEG __declspec(align(16))
# define ALIGN16_END
#else /* gcc or icc */
# define ALIGN16_BEG
# define ALIGN16_END __attribute__((aligned(16)))
#endif
/* __m128 is ugly to write */
typedef __m128 v4sf; // vector of 4 float (sse1)
#ifdef USE_SSE2
# include <emmintrin.h>
typedef __m128i v4si; // vector of 4 int (sse2)
#else
typedef __m64 v2si; // vector of 2 int (mmx)
#endif
/* natural logarithm computed for 4 simultaneous float
return NaN for x <= 0
*/
v4sf log_ps(v4sf x);
#ifndef USE_SSE2
typedef union xmm_mm_union {
__m128 xmm;
__m64 mm[2];
} xmm_mm_union;
#define COPY_XMM_TO_MM(xmm_, mm0_, mm1_) { \
xmm_mm_union u; u.xmm = xmm_; \
mm0_ = u.mm[0]; \
mm1_ = u.mm[1]; \
}
#define COPY_MM_TO_XMM(mm0_, mm1_, xmm_) { \
xmm_mm_union u; u.mm[0]=mm0_; u.mm[1]=mm1_; xmm_ = u.xmm; \
}
#endif // USE_SSE2
v4sf exp_ps(v4sf x);
v4sf sin_ps(v4sf x);
/* almost the same as sin_ps */
v4sf cos_ps(v4sf x);
/* since sin_ps and cos_ps are almost identical, sincos_ps could replace both of them..
it is almost as fast, and gives you a free cosine with your sine */
void sincos_ps(v4sf x, v4sf *s, v4sf *c);
#endif