diff --git a/Doxyfile b/Doxyfile index 3935bac..fd5004f 100644 --- a/Doxyfile +++ b/Doxyfile @@ -437,7 +437,7 @@ WARN_LOGFILE = # directories like "/usr/src/myproject". Separate the files or directories # with spaces. -INPUT = "./src/" "mainpage.dox" +INPUT = "./include/" "mainpage.dox" # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp @@ -873,18 +873,6 @@ GENERATE_XML = NO XML_OUTPUT = xml -# The XML_SCHEMA tag can be used to specify an XML schema, -# which can be used by a validating XML parser to check the -# syntax of the XML files. - -XML_SCHEMA = - -# The XML_DTD tag can be used to specify an XML DTD, -# which can be used by a validating XML parser to check the -# syntax of the XML files. - -XML_DTD = - # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that diff --git a/Makefile b/Makefile index 7d287bf..9ec0607 100644 --- a/Makefile +++ b/Makefile @@ -7,46 +7,32 @@ all:|pre libs pre: @mkdir -p lib -libs: nn - test: all make -C tests -install: |all _install -_install: +install: all @mkdir -p build/lib @cp lib/*.so build/lib/ -nn: | nn_build lib/NeuralNetwork.a lib/NeuralNetwork.so +libs: | nn_build lib/NeuralNetwork.a lib/NeuralNetwork.so -lib/NeuralNetwork.so: ./src/NeuralNetwork/NeuralNetwork.so - cp ./src/NeuralNetwork/NeuralNetwork.so ./lib/ +lib/NeuralNetwork.so: ./src/NeuralNetwork.so + cp ./src/NeuralNetwork.so ./lib/ -lib/NeuralNetwork.a: ./src/NeuralNetwork/NeuralNetwork.a - cp ./src/NeuralNetwork/NeuralNetwork.a ./lib/ - cp ./src/NeuralNetwork/NeuralNetwork.nm ./lib/ +lib/NeuralNetwork.a: ./src/NeuralNetwork.a + cp ./src/NeuralNetwork.a ./lib/ + cp ./src/NeuralNetwork.nm ./lib/ nn_build: - @make -C src/NeuralNetwork - -genetics: | genetics_build lib/Genetics.a lib/Genetics.so - -lib/Genetics.so: ./src/Genetics/Genetics.so - cp ./src/Genetics/Genetics.so ./lib/ - -lib/Genetics.a: ./src/Genetics/Genetics.a - cp ./src/Genetics/Genetics.a ./lib/ - cp ./src/Genetics/Genetics.nm ./lib/ - -genetics_build: - @make -C src/Genetics + @make -C src/ documentation: doxygen clean: - @make -C src/NeuralNetwork clean + @make -C src clean @make -C tests clean #@rm -f ./*.so ./*.a ./*.nm @rm -f ./lib/*.so ./lib/*.a ./lib/*.nm + @echo "Cleaned....." diff --git a/Makefile.const b/Makefile.const index bdb6785..ee25adb 100644 --- a/Makefile.const +++ b/Makefile.const @@ -4,9 +4,9 @@ CXXFLAGS+= -std=c++14 #-fprefetch-loop-arrays CXXFLAGS+= -pg -fPIC CXXFLAGS+= -g -CXXFLAGS+= -fPIC -pthread +CXXFLAGS+= -fPIC -pthread #CXXFLAGS+= -DUSE_SSE2 -OPTIMALIZATION = -O3 -march=native -mtune=native +OPTIMALIZATION = -O3 -march=native -mtune=native %.o : %.cpp %.h $(CXX) $(CXXFLAGS) $(OPTIMALIZATION) -c $< -o $@ diff --git a/include/NeuralNetwork/ActivationFunction/ActivationFunction.h b/include/NeuralNetwork/ActivationFunction/ActivationFunction.h new file mode 100644 index 0000000..07f6245 --- /dev/null +++ b/include/NeuralNetwork/ActivationFunction/ActivationFunction.h @@ -0,0 +1,42 @@ +#pragma once + +#include + +namespace NeuralNetwork { +namespace ActivationFunction { + + /** + * @author Tomas Cernik (Tom.Cernik@gmail.com) + * @brief Abstract class of activation function + */ + class ActivationFunction { + public: + + virtual ~ActivationFunction() {} + + /** + * @brief Returns derivation of output + * @param input is input of function + * @param output is output of function + */ + virtual float derivatedOutput(const float &input,const float &output)=0; + + /** + * @brief Returns value of output + * @param x is input of function + */ + virtual float operator()(const float &x)=0; + + /** + * @brief Function returns clone of object + */ + virtual ActivationFunction* clone() const = 0; + + /** + * @brief This is a virtual function for storing Activation function + * @returns json describing function + */ + virtual std::string stringify() const =0; + }; +} +} \ No newline at end of file diff --git a/include/NeuralNetwork/ActivationFunction/Heaviside.h b/include/NeuralNetwork/ActivationFunction/Heaviside.h new file mode 100644 index 0000000..ed351dc --- /dev/null +++ b/include/NeuralNetwork/ActivationFunction/Heaviside.h @@ -0,0 +1,26 @@ +#pragma once + +#include "./ActivationFunction.h" + +namespace NeuralNetwork { +namespace ActivationFunction { + + class Heaviside: public ActivationFunction { + public: + Heaviside(const float &lambdaP=1.0): lambda(lambdaP) {} + inline virtual float derivatedOutput(const float &,const float &) override { return 1.0; } + inline virtual float operator()(const float &x) override { return x>lambda ? 1.0f : 0.0f; }; + + virtual ActivationFunction* clone() const override { + return new Heaviside(lambda); + } + + virtual std::string stringify() const override { + return "{ \"class\": \"NeuralNetwork::ActivationFunction::Heaviside\", \"lamba\" : "+std::to_string(lambda)+"}"; + } + + protected: + float lambda; + }; +} +} \ No newline at end of file diff --git a/include/NeuralNetwork/ActivationFunction/HyperbolicTangent.h b/include/NeuralNetwork/ActivationFunction/HyperbolicTangent.h new file mode 100644 index 0000000..4e25b31 --- /dev/null +++ b/include/NeuralNetwork/ActivationFunction/HyperbolicTangent.h @@ -0,0 +1,27 @@ +#pragma once + +#include "./ActivationFunction.h" + +#include + +namespace NeuralNetwork { +namespace ActivationFunction { + + class HyperbolicTangent: public ActivationFunction { + public: + HyperbolicTangent(const float& lam=1):lambda(lam) {} + inline virtual float derivatedOutput(const float&,const float &output) override { return lambda*(1-output*output); } + inline virtual float operator()(const float &x) override { return tanh(lambda*x); }; + virtual ActivationFunction* clone() const override { + return new HyperbolicTangent(lambda); + } + + virtual std::string stringify() const override { + return "{ \"class\": \"NeuralNetwork::ActivationFunction::HyperbolicTangent\", \"lamba\" : "+std::to_string(lambda)+"}"; + } + + protected: + float lambda; + }; +} +} \ No newline at end of file diff --git a/include/NeuralNetwork/ActivationFunction/Sigmoid.h b/include/NeuralNetwork/ActivationFunction/Sigmoid.h new file mode 100644 index 0000000..86b0c09 --- /dev/null +++ b/include/NeuralNetwork/ActivationFunction/Sigmoid.h @@ -0,0 +1,35 @@ +#pragma once + +#include + +#include "./StreamingActivationFunction.h" +#include "../../sse_mathfun.h" + +namespace NeuralNetwork { +namespace ActivationFunction { + + /** + * @author Tomas Cernik (Tom.Cernik@gmail.com) + * @brief Class for computing sigmoid + */ + class Sigmoid: public StreamingActivationFunction { + public: + Sigmoid(const float lambdaP = -0.5): lambda(lambdaP) {} + inline virtual float derivatedOutput(const float&,const float &output) override { return lambda*output*(1.0f-output); } + inline virtual float operator()(const float &x) override { return 1.0f / (1.0f +exp(lambda*x) ); }; + inline virtual __m128 operator()(const __m128 &x) override { + // exp_ps is extremly slow! + return _mm_div_ps(_mm_set1_ps(1.0),_mm_add_ps(exp_ps(_mm_mul_ps(_mm_set1_ps(lambda),x)),_mm_set1_ps(1.0))); + } + virtual ActivationFunction* clone() const override { + return new Sigmoid(lambda); + } + + virtual std::string stringify() const override { + return "{ \"class\": \"NeuralNetwork::ActivationFunction::Sigmoid\", \"lamba\" : "+std::to_string(lambda)+"}"; + } + protected: + float lambda; + }; +} +} \ No newline at end of file diff --git a/include/NeuralNetwork/ActivationFunction/StreamingActivationFunction.h b/include/NeuralNetwork/ActivationFunction/StreamingActivationFunction.h new file mode 100644 index 0000000..b9acb3b --- /dev/null +++ b/include/NeuralNetwork/ActivationFunction/StreamingActivationFunction.h @@ -0,0 +1,26 @@ +#pragma once + +#include + +#include "./ActivationFunction.h" + +namespace NeuralNetwork { +namespace ActivationFunction { + + /** + * @author Tomas Cernik (Tom.Cernik@gmail.com) + * @brief Abstract class of activation function with support of SSE + */ + class StreamingActivationFunction : public ActivationFunction { + public: + virtual float derivatedOutput(const float &input,const float &output)=0; + virtual float operator()(const float &x)=0; + + /** + * @brief Returns value of four outputs + * @param x is float[4], in every array value can be stored + */ + virtual __m128 operator()(const __m128 &x)=0; + }; +} +} \ No newline at end of file diff --git a/include/NeuralNetwork/BasisFunction/BasisFunction.h b/include/NeuralNetwork/BasisFunction/BasisFunction.h new file mode 100644 index 0000000..5cd1e7a --- /dev/null +++ b/include/NeuralNetwork/BasisFunction/BasisFunction.h @@ -0,0 +1,27 @@ +#pragma once + +#include +#include + +#include + +namespace NeuralNetwork { +namespace BasisFunction { + class BasisFunction { + public: + virtual ~BasisFunction() {} + virtual float operator()(const std::vector& weights, const std::vector& input)=0; + + /** + * @brief Function returns clone of object + */ + virtual BasisFunction* clone() const = 0; + + /** + * @brief This is a virtual function for storing Basis function + * @returns json describing function + */ + virtual std::string stringify() const =0; + }; +} +} \ No newline at end of file diff --git a/include/NeuralNetwork/BasisFunction/Linear.h b/include/NeuralNetwork/BasisFunction/Linear.h new file mode 100644 index 0000000..6833938 --- /dev/null +++ b/include/NeuralNetwork/BasisFunction/Linear.h @@ -0,0 +1,68 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include "./StreamingBasisFunction.h" + +#include "../../sse_mathfun.h" + +namespace NeuralNetwork { +namespace BasisFunction { + + class Linear: public StreamingBasisFunction { + public: + Linear() {} + + inline virtual float computeStreaming(const std::vector& weights, const std::vector& input) override { + size_t inputSize=input.size(); + size_t alignedPrev=inputSize-inputSize%4; + + const float* weightsData=weights.data(); + const float* inputData=input.data(); + vec4f partialSolution; + partialSolution.sse =_mm_setzero_ps(); + + //TODO prefetch ?? + for(register size_t k=0;k& weights, const std::vector& input) override { + register float tmp = 0; + size_t inputSize=input.size(); + for(size_t k=0;k + +#include "./BasisFunction.h" + +namespace NeuralNetwork { +namespace BasisFunction { + class StreamingBasisFunction : public BasisFunction { + public: + union vec4f{ + __m128 sse; + float f[4]; + }; + + virtual float operator()(const std::vector& weights, const std::vector& input) override { + return computeStreaming(weights,input); + } + virtual float computeStreaming(const std::vector& weights, const std::vector& input) =0; + virtual float compute(const std::vector& weights, const std::vector& input) =0; + }; +} +} \ No newline at end of file diff --git a/include/NeuralNetwork/Layer.h b/include/NeuralNetwork/Layer.h new file mode 100644 index 0000000..65b7241 --- /dev/null +++ b/include/NeuralNetwork/Layer.h @@ -0,0 +1,33 @@ +#pragma once + +#include +#include + +#include "Neuron.h" + +namespace NeuralNetwork +{ + /** + * @author Tomas Cernik (Tom.Cernik@gmail.com) + * @brief Abstract class for all Layers of neurons + */ + class Layer + { + public: + + virtual ~Layer() {}; + + /** + * @brief This is a virtual function for selecting neuron + * @param neuron is position in layer + * @returns Specific neuron + */ + + virtual Neuron& operator[](const size_t& neuron)=0; + /** + * @returns Size of layer + */ + virtual size_t size() const=0; + }; + +} \ No newline at end of file diff --git a/include/NeuralNetwork/Network.h b/include/NeuralNetwork/Network.h new file mode 100644 index 0000000..c5d144f --- /dev/null +++ b/include/NeuralNetwork/Network.h @@ -0,0 +1,55 @@ +#pragma once + +#include +#include + +#include "Neuron.h" + +#include "Stringifiable.h" + +#include +#include + +namespace NeuralNetwork +{ + + /** + * @author Tomas Cernik (Tom.Cernik@gmail.com) + * @brief Abstract model of simple Network + */ + class Network : public Stringifiable + { + public: + /** + * @brief Constructor for Network + */ + inline Network() {}; + + /** + * @brief Virtual destructor for Network + */ + virtual ~Network() {}; + + /** + * @brief This is a virtual function for all networks + * @param input is input of network + * @returns output of network + */ + virtual std::vector computeOutput(const std::vector& input)=0; + + /** + * @param t is number of threads, if set to 0 or 1 then threading is disabled + * @brief Enables or disables Threaded computing of ANN + */ + + inline virtual void setThreads(const unsigned& t) final {threads=t;} + + using Stringifiable::stringify; + + protected: + /** + * @brief Number of threads used by network + */ + unsigned threads=1; + }; +} \ No newline at end of file diff --git a/src/NeuralNetwork/Neuron.h b/include/NeuralNetwork/Neuron.h similarity index 51% rename from src/NeuralNetwork/Neuron.h rename to include/NeuralNetwork/Neuron.h index bccbf72..132a71e 100644 --- a/src/NeuralNetwork/Neuron.h +++ b/include/NeuralNetwork/Neuron.h @@ -1,7 +1,6 @@ -#ifndef _S_NN_NEURON_H_ -#define _S_NN_NEURON_H_ +#pragma once -#include +#include namespace NeuralNetwork { @@ -12,19 +11,35 @@ namespace NeuralNetwork class Neuron { public: + + /** + * @brief returns unique id for neuron + */ + virtual unsigned long id() const =0; + /** * @brief virtual destructor for Neuron */ virtual ~Neuron() {}; - virtual float getWeight(const int &w) const =0; + /** + * @brief This is a virtual function for storing network + * @returns json describing network and it's state + */ + virtual std::string stringify(const std::string &prefix="") const =0; + + /** + * @brief Gets weight + * @param n is neuron + */ + virtual float getWeight(const Neuron &n) const =0; /** * @brief Sets weight - * @param i is number of neuron - * @param p is new weight for input neuron i + * @param n is neuron + * @param w is new weight for input neuron n */ - virtual void setWeight(const int& i ,const float &p) =0; + virtual void setWeight(const Neuron& n ,const float &w) =0; /** * @brief Returns output of neuron @@ -34,16 +49,16 @@ namespace NeuralNetwork /** * @brief Returns input of neuron */ - virtual float input() const=0; + virtual float value() const=0; /** * @brief Returns value for derivation of activation function */ - virtual float derivatedOutput() const=0; +// virtual float derivatedOutput() const=0; /** * @brief Function sets bias for neuron - * @param biad is new bias (initial value for neuron) + * @param bias is new bias (initial value for neuron) */ virtual void setBias(const float &bias)=0; @@ -53,5 +68,4 @@ namespace NeuralNetwork virtual float getBias() const=0; protected: }; -} -#endif +} \ No newline at end of file diff --git a/include/NeuralNetwork/Recurrent/Network.h b/include/NeuralNetwork/Recurrent/Network.h new file mode 100644 index 0000000..8163c65 --- /dev/null +++ b/include/NeuralNetwork/Recurrent/Network.h @@ -0,0 +1,88 @@ +#pragma once + +#include "../Network.h" +#include "Neuron.h" + +#include + +#include +#include +#include + +namespace NeuralNetwork { +namespace Recurrent { + + /** + * @author Tomas Cernik (Tom.Cernik@gmail.com) + * @brief Reccurent model of Artifical neural network + */ + class Network: public NeuralNetwork::Network { + public: + + /** + * @brief Constructor for Network + * @param _inputSize is number of inputs to network + * @param _outputSize is size of output from network + * @param hiddenUnits is number of hiddenUnits to be created + */ + inline Network(size_t _inputSize, size_t _outputSize,size_t hiddenUnits=0):NeuralNetwork::Network(),inputSize(_inputSize),outputSize(_outputSize), neurons(0) { + for(size_t i=0;i<_inputSize+_outputSize;i++) { + addNeuron(); + } + + for(size_t i=0;i computeOutput(const std::vector& input) override { + return computeOutput(input,1); + } + + /** + * @brief This is a function to compute iterations of network + * @param input is input of network + * @param iterations is number of iterations + * @returns output of network + */ + std::vector computeOutput(const std::vector& input, unsigned int iterations); + + std::vector& getNeurons () { + return neurons; + } + + using NeuralNetwork::Network::stringify; + + void stringify(std::ostream& out) const override; + + Neuron& addNeuron() { + neurons.push_back(Recurrent::Neuron(neurons.size())); + Neuron &newNeuron=neurons.back(); + for(size_t i=0;i neurons; + }; +} +} \ No newline at end of file diff --git a/include/NeuralNetwork/Recurrent/Neuron.h b/include/NeuralNetwork/Recurrent/Neuron.h new file mode 100644 index 0000000..5572b4c --- /dev/null +++ b/include/NeuralNetwork/Recurrent/Neuron.h @@ -0,0 +1,123 @@ +#pragma once + +#include "../Neuron.h" +#include +#include +#include + + +#include +#include +#include + +namespace NeuralNetwork { +namespace Recurrent { + + class Network; + + /** + * @author Tomas Cernik (Tom.Cernik@gmail.com) + * @brief Class of recurrent neuron. + */ + class Neuron : public NeuralNetwork::Neuron + { + public: + Neuron(unsigned long _id=0,const float& _bias = 0): NeuralNetwork::Neuron(), basis(new BasisFunction::Linear), + activation(new ActivationFunction::Sigmoid(-4.9)), + id_(_id),bias(_bias),weights(_id+1),_output(0),_value(0) { + } + + Neuron(const Neuron &r): NeuralNetwork::Neuron(), basis(r.basis->clone()), activation(r.activation->clone()),id_(r.id_), + bias(r.bias), weights(r.weights), _output(r._output), _value(r._value) { + } + virtual ~Neuron() { + delete basis; + delete activation; + }; + + virtual std::string stringify(const std::string &prefix="") const override; + + Recurrent::Neuron& operator=(const NeuralNetwork::Recurrent::Neuron&r) { + id_=r.id_; + bias=r.bias; + weights=r.weights; + basis=r.basis->clone(); + activation=r.activation->clone(); + return *this; + } + + virtual long unsigned int id() const override { + return id_; + }; + + /** + * @brief Gets weight + * @param n is neuron + */ + virtual float getWeight(const NeuralNetwork::Neuron &n) const override { + return weights[n.id()]; + } + + /** + * @brief Sets weight + * @param n is neuron + * @param w is new weight for input neuron n + */ + virtual void setWeight(const NeuralNetwork::Neuron& n ,const float &w) override { + if(weights.size()& inputs) { + //compute value + _value=basis->operator()(weights,inputs)+bias; + + //compute output + _output=activation->operator()(_value); + + return _output; + } + + protected: + BasisFunction::BasisFunction *basis; + ActivationFunction::ActivationFunction *activation; + + unsigned long id_; + float bias; + std::vector weights; + float _output; + float _value; + }; +} +} diff --git a/include/NeuralNetwork/Stringifiable.h b/include/NeuralNetwork/Stringifiable.h new file mode 100644 index 0000000..a165420 --- /dev/null +++ b/include/NeuralNetwork/Stringifiable.h @@ -0,0 +1,29 @@ +#pragma once + +#include + +namespace NeuralNetwork { + class Stringifiable { + public: + + virtual ~Stringifiable() { + } + + /** + * @brief This is a virtual function for class + */ + virtual void stringify(std::ostream& out) const =0; + + virtual std::string stringify() final { + std::ostringstream s; + stringify(s); + return s.str(); + } + + }; + + static std::ostream& operator<<(std::ostream& o, const Stringifiable& n) { + n.stringify(o); + return o; + } +} \ No newline at end of file diff --git a/include/Tools/Array.h b/include/Tools/Array.h new file mode 100644 index 0000000..bb7d4b3 --- /dev/null +++ b/include/Tools/Array.h @@ -0,0 +1,80 @@ +#pragma once + +namespace Array { + + /** + * @author Tomas Cernik (Tom.Cernik@gmail.com) + * @brief Template of array for simple usage + */ + + template + class Array { + public: + + Array(unsigned long size=0):arr(size==0? nullptr: new T[size]),_size(0) { + + } + + Array (const Array& r):arr(r.arr),_size(r._size) { + } + + ~Array() { + } + + inline Array& operator=(const Array& r) { + arr=r.arr; + _size=r._size; + } + + inline void resize(unsigned long size) { + if(arr==nullptr) { + arr=new T[size]; + _size=size; + }else { + T* tmp=new T[size]; + for(unsigned long i=0;i<_size;i++) { + tmp[i]=arr[i]; + } + delete[] arr; + arr=tmp; + } + } + + inline void free() { + delete[] arr; + arr=nullptr; + _size=0; + } + + inline const T& operator[](unsigned long i) const { + return arr[i]; + } + + inline T& operator[](unsigned long i) { + return arr[i]; + } + + unsigned long size() const { + return _size; + } + + protected: + T* arr; + unsigned long _size; + private: + }; + + template + class DynamicArray: public Array { + public: + DynamicArray(unsigned long size=0,float _scaleFactor=1):Array(size),scaleFactor(_scaleFactor) { + + } + + + + protected: + float scaleFactor; + private: + }; +} \ No newline at end of file diff --git a/src/sse_mathfun.h b/include/sse_mathfun.h similarity index 100% rename from src/sse_mathfun.h rename to include/sse_mathfun.h diff --git a/mainpage.dox b/mainpage.dox index ae175f0..42f715f 100644 --- a/mainpage.dox +++ b/mainpage.dox @@ -6,6 +6,4 @@ @author Tomas Cernik (Tom.Cernik@gmail.com) -TODO - */ diff --git a/src/Makefile b/src/Makefile index b85b7b9..a7f89c1 100644 --- a/src/Makefile +++ b/src/Makefile @@ -1,8 +1,32 @@ -OBJFILES= sse_mathfun.o - include ../Makefile.const -all: $(OBJFILES) +OBJFILES= ./sse_mathfun.o ./NeuralNetwork/Recurrent/Network.o ./NeuralNetwork/Recurrent/Neuron.o + +#LayerNetwork.o\ +# Learning/Learning.o Learning/BackPropagation.o ../sse_mathfun.o + +LINKFILES= + +LIBNAME=NeuralNetwork + +all: lib + +spec:=../include/ + + +%.o : %.cpp $(patsubst ./%.o,../include/%.h,$<) + $(CXX) $(CXXFLAGS) $(OPTIMALIZATION) -I../include -c $< -o $@ + +lib: $(LIBNAME).so $(LIBNAME).a + +$(LIBNAME).so: $(OBJFILES) + $(CXX) -shared $(CXXFLAGS) $(OBJFILES) $(LINKFILES) -o $(LIBNAME).so + +$(LIBNAME).a: $(OBJFILES) + rm -f $(LIBNAME).a # create new library + ar rcv $(LIBNAME).a $(OBJFILES) $(LINKFILES) + ranlib $(LIBNAME).a + nm --demangle $(LIBNAME).a > $(LIBNAME).nm clean: - @rm -f ./*.o ./*.so ./*.a ./*.nm ./*/*.o + @rm -f ./*.o ./*.so ./*.a ./*.nm ./*/*.o \ No newline at end of file diff --git a/src/NeuralNetwork/ActivationFunction/ActivationFunction.h b/src/NeuralNetwork/ActivationFunction/ActivationFunction.h deleted file mode 100644 index f5b249c..0000000 --- a/src/NeuralNetwork/ActivationFunction/ActivationFunction.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef __TRAN_FUN_H_ -#define __TRAN_FUN_H_ - -#include - -namespace NeuralNetwork -{ -namespace ActivationFunction -{ - class ActivationFunction - { - public: - virtual ~ActivationFunction() {} - virtual float derivatedOutput(const float &input,const float &output)=0; - virtual float operator()(const float &x)=0; - }; -} -} -#endif \ No newline at end of file diff --git a/src/NeuralNetwork/ActivationFunction/Heaviside.h b/src/NeuralNetwork/ActivationFunction/Heaviside.h deleted file mode 100644 index 5a8c815..0000000 --- a/src/NeuralNetwork/ActivationFunction/Heaviside.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef __TRAN_HEAVISIDE_H_ -#define __TRAN_HEAVISIDE_H_ - -#include "./ActivationFunction.h" - -namespace NeuralNetwork -{ -namespace ActivationFunction -{ - class Heaviside: public ActivationFunction - { - public: - Sigmoid(const float &lambdaP): lambda(lambdaP) {} - inline virtual float derivatedOutput(const float &input,const float &output) override { return 1.0; } - inline virtual float operator()(const float &x) override { return x>k ? 1.0f : 0.0f; }; - protected: - float lambda; - - }; -} -} -#endif \ No newline at end of file diff --git a/src/NeuralNetwork/ActivationFunction/HyperbolicTangent.h b/src/NeuralNetwork/ActivationFunction/HyperbolicTangent.h deleted file mode 100644 index 8b9ed27..0000000 --- a/src/NeuralNetwork/ActivationFunction/HyperbolicTangent.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef __TRAN_HYPTAN_H_ -#define __TRAN_HYPTAN_H_ - -#include "./ActivationFunction.h" - -namespace NeuralNetwork -{ -namespace ActivationFunction -{ - class HyperbolicTangent: public ActivationFunction - { - public: - HyperbolicTangent(const float& lam=1):lambda(lam) {} - inline virtual float derivatedOutput(const float&,const float &output) override { return lambda*(1-output*output); } - inline virtual float operator()(const float &x) override { return tanh(lambda*x); }; - protected: - float lambda; - }; -} -} -#endif \ No newline at end of file diff --git a/src/NeuralNetwork/ActivationFunction/Sigmoid.h b/src/NeuralNetwork/ActivationFunction/Sigmoid.h deleted file mode 100644 index a64a3d1..0000000 --- a/src/NeuralNetwork/ActivationFunction/Sigmoid.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef __TRAN_SIGMOID_H_ -#define __TRAN_SIGMOID_H_ - -#include "./StreamingActivationFunction.h" - -namespace NeuralNetwork -{ -namespace ActivationFunction -{ - class Sigmoid: public StreamingActivationFunction - { - public: - Sigmoid(const float lambdaP = 0.8): lambda(lambdaP) {} - inline virtual float derivatedOutput(const float&,const float &output) override { return lambda*output*(1.0f-output); } - inline virtual float operator()(const float &x) override { return 1.0f / (1.0f +exp(-lambda*x) ); }; - inline virtual __m128 operator()(__m128 x) override { - x=_mm_mul_ps(temporaryConstLambda,x); //-lambda*sol[k] - x=exp_ps(x); //exp(x) - x= _mm_add_ps(x,temporaryConst1); //1+exp() - x= _mm_div_ps(temporaryConst1,x);//1/.... - return x; - } - protected: - float lambda; - __m128 temporaryConst1=_mm_set1_ps(1.0); - __m128 temporaryConstLambda=_mm_set1_ps(-lambda); - }; -} -} -#endif \ No newline at end of file diff --git a/src/NeuralNetwork/ActivationFunction/StreamingActivationFunction.h b/src/NeuralNetwork/ActivationFunction/StreamingActivationFunction.h deleted file mode 100644 index c9179ac..0000000 --- a/src/NeuralNetwork/ActivationFunction/StreamingActivationFunction.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef __STREAMINGTRAN_FUN_H_ -#define __STREAMINGTRAN_FUN_H_ - -#include - -#include "../../sse_mathfun.h" - -#include "./ActivationFunction.h" - -namespace NeuralNetwork -{ -namespace ActivationFunction -{ - class StreamingActivationFunction : public ActivationFunction - { - public: - virtual float derivatedOutput(const float &input,const float &output)=0; - virtual float operator()(const float &x)=0; - virtual __m128 operator()(__m128)=0; // it must be overriden to be used! - }; -} -} -#endif \ No newline at end of file diff --git a/src/NeuralNetwork/BasisFunction/BasisFunction.h b/src/NeuralNetwork/BasisFunction/BasisFunction.h deleted file mode 100644 index bfe669e..0000000 --- a/src/NeuralNetwork/BasisFunction/BasisFunction.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef _BASIS_FUN_H_ -#define _BASIS_FUN_H_ - -#include - -namespace NeuralNetwork -{ -namespace BasisFunction -{ - class BasisFunction - { - public: - virtual ~BasisFunction() {} - virtual float operator()(const size_t &inputSize, const float* weights, const float* input)=0; - }; -} -} -#endif \ No newline at end of file diff --git a/src/NeuralNetwork/BasisFunction/FeedForward.h b/src/NeuralNetwork/BasisFunction/FeedForward.h deleted file mode 100644 index 06ddb8e..0000000 --- a/src/NeuralNetwork/BasisFunction/FeedForward.h +++ /dev/null @@ -1,63 +0,0 @@ -#ifndef __BASIS_FEEDFORWARD_H_ -#define __BASIS_FEEDFORWARD_H_ - -#include "./StreamingBasisFunction.h" - -#include -#include -#include -#include -#include - -namespace NeuralNetwork -{ -namespace BasisFunction -{ - class FeedForward: public StreamingBasisFunction - { - public: - FeedForward() {} - - inline virtual __m128 operator()(const size_t& inputSize, const float* weights, const float* input, const size_t& alignedPrev) - { - __m128 partialSolution= _mm_setzero_ps(); - __m128 w=_mm_setzero_ps(); - __m128 sols; - for(register size_t k=alignedPrev;k - -#include "../../sse_mathfun.h" - -#include "./BasisFunction.h" - -namespace NeuralNetwork -{ -namespace BasisFunction -{ - class StreamingBasisFunction : public BasisFunction - { - public: - virtual float operator()(const size_t &inputSize, const float* weights, const float* input) = 0; - virtual __m128 operator()(const size_t& inputSize, const float* weights, const float* input, const size_t& alignedPrev) =0; - }; -} -} -#endif \ No newline at end of file diff --git a/src/NeuralNetwork/FeedForward.h b/src/NeuralNetwork/FeedForward.h deleted file mode 100644 index e66a100..0000000 --- a/src/NeuralNetwork/FeedForward.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef _S_NN_FF_H_ -#define _S_NN_FF_H_ - -#include "LayerNetwork.h" - -namespace NeuralNetwork -{ - class FeedForward : public LayerNetwork - { - public: - FeedForward(std::initializer_list s, double lam=NeuralNetwork::lambda, - LayerNetworkInitializer weightInit= - [](const size_t&, const size_t &, const size_t &)->float{ return 1.0-((float)(rand()%2001))/1000.0;} - ) : LayerNetwork(s,lam,weightInit) {} - - }; -} -#endif \ No newline at end of file diff --git a/src/NeuralNetwork/LayerNetwork.cpp b/src/NeuralNetwork/LayerNetwork.cpp deleted file mode 100644 index e3b04a9..0000000 --- a/src/NeuralNetwork/LayerNetwork.cpp +++ /dev/null @@ -1,201 +0,0 @@ -#include "LayerNetwork.h" - -using namespace NeuralNetwork; - -LayerNetworkLayer::~LayerNetworkLayer() -{ - if(neurons!=nullptr) - { - for(size_t i=0;i=layerSize) - throw std::out_of_range("Not so many neurons in layers."); - - return *neurons[neuron]; - -} - -LayerNetwork::LayerNetwork(std::initializer_list s, double lam, LayerNetworkInitializer weightInit): Network(),layers(s.size()) -{ - transfer = new ActivationFunction::ActivationFunction*[s.size()]; - weights= new float**[s.size()]; - layerSizes= new size_t[s.size()]; - outputs= new float*[s.size()]; - inputs= new float*[s.size()]; - register int i=0; - register int prev_size=1; - for(int layeSize:s) // TODO rename - { - transfer[i]= new ActivationFunction::Sigmoid(lam); - layeSize+=1; - if(i==0) - { - prev_size=layeSize; - } - layerSizes[i]=layeSize; - weights[i]= new float*[layeSize]; - outputs[i]= new float[layeSize]; - inputs[i]= new float[layeSize]; - - outputs[i][0]=1.0; - for (int j=1;j(transfer[layer]); - BasisFunction::StreamingBasisFunction *bFunc=dynamic_cast(basisFunction); - - size_t alignedPrev=prevSize>16?(prevSize-(prevSize%16)):0; - __m128 partialSolution; - - if(prevSize >=4 && function !=nullptr && bFunc != nullptr) - { - for( size_t j=begin;joperator()(prevSize,weights[layer][j],sol,alignedPrev); - _mm_store_ss(inputs[layer]+j,partialSolution); - partialSolution=function->operator()(partialSolution); - _mm_store_ss(newSolution+j,partialSolution); - } - }else - { - for( size_t j=begin;j=4) - { - partialSolution=bFunc->operator()(prevSize,weights[layer][j],sol,alignedPrev); - _mm_store_ss(inputs[layer]+j,partialSolution); - newSolution[j]=transfer[layer]->operator()(inputs[layer][j]); - }else - { - const float tmp=basisFunction->operator()(prevSize,weights[layer][j],sol); - inputs[layer][j]=tmp; - newSolution[j]=transfer[layer]->operator()(tmp); - } - } - } -} - -std::vector LayerNetwork::solve(const std::vector& p) -{ - register float* sol=outputs[0]; - - if(p.size()+1 != layerSizes[0]) - { - throw std::out_of_range("Wrong number of inputs"); - } - - sol[0]=1; - for(size_t i=0;i 1 && (layerSizes[i] > 700 || prevSize > 700)) // 700 is an guess about actual size, when creating thread has some speedup - { - std::vector th; - size_t s=1; - register size_t step =layerSizes[i]/threads; - for(size_t t=1;tvoid{ - solvePart(newSolution,from,to,prevSize,sol,i); - },s,s+step)); - s+=step; - } - solvePart(newSolution,s,layerSizes[i],prevSize,sol,i); - for (auto& thr : th) - thr.join(); - }else - { - solvePart(newSolution,1,layerSizes[i],prevSize,sol,i); - } - prevSize=layerSizes[i]; - sol=newSolution; - } - std::vector ret; - for(size_t i=1;i=layers) - throw std::out_of_range("Not so many layers in network."); - - return *ffLayers[l]; -} diff --git a/src/NeuralNetwork/LayerNetwork.h b/src/NeuralNetwork/LayerNetwork.h deleted file mode 100644 index f9ee113..0000000 --- a/src/NeuralNetwork/LayerNetwork.h +++ /dev/null @@ -1,140 +0,0 @@ -#ifndef _NN_LN_H_ -#define _NN_LN_H_ - -#include "Network.h" - -#include "ActivationFunction/Sigmoid.h" - -#include "BasisFunction/FeedForward.h" - -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include -#include - -#include "../sse_mathfun.h" - -namespace NeuralNetwork -{ - class LayerNetworkNeuron : public Neuron - { - public: - inline LayerNetworkNeuron(float *w, float &outputF, float &i,float lam,ActivationFunction::ActivationFunction &fun):function(fun),weights(w),out(outputF),inputs(i),lambda(lam) { } - - LayerNetworkNeuron() = delete; - LayerNetworkNeuron(const LayerNetworkNeuron&) = delete; - LayerNetworkNeuron& operator=(const LayerNetworkNeuron&) = delete; - - inline virtual float getWeight(const int& i ) const override { return weights[i+1]; } - inline virtual void setWeight(const int& i,const float &p) override { weights[i+1]=p; } - - inline virtual float output() const override { return out; } - inline virtual float input() const override { return inputs; } - inline virtual float derivatedOutput() const override { return function.derivatedOutput(inputs,out); } - - inline virtual float getBias() const override { return weights[0]; } - inline virtual void setBias(const float & bias) override { weights[0]=bias; } - protected: - ActivationFunction::ActivationFunction &function; - float *weights; - float &out; - float &inputs; - float lambda; - private: - }; - - class LayerNetworkLayer: public Layer - { - public: - inline LayerNetworkLayer(size_t s, float **w,float *out,float *in,float lam,ActivationFunction::ActivationFunction &fun): function(fun), layerSize(s),weights(w),outputs(out),inputs(in),lambda(lam) {} - ~LayerNetworkLayer(); - - LayerNetworkLayer(const LayerNetworkLayer &) = delete; - LayerNetworkLayer& operator=(const LayerNetworkLayer &) = delete; - - virtual LayerNetworkNeuron& operator[](const size_t& neuron) override; - inline virtual size_t size() const override {return layerSize-1;}; - protected: - ActivationFunction::ActivationFunction &function; - LayerNetworkNeuron **neurons=nullptr; - size_t layerSize; - float **weights; - float *outputs; - float *inputs; - float lambda; - }; - - /** - * @brief typedef for LayerNetwork network initializating function - */ - typedef std::function LayerNetworkInitializer; - - /** - * @author Tomas Cernik (Tom.Cernik@gmail.com) - * @brief Class representing LayerNetwork network - * @see ACyclicNetwork - * - * @b Usage: - * @code - * Shin::NeuralNetwork::LayerNetwork net({1,5,2}); - * net.setThreads(2); // it alows network to use 2 threads if it needs to. - * Shin::Solution sol = net.solve(Shin::Problem(0.1)) // and finaly, solve Problem - * @endcode - */ - class LayerNetwork:public Network - { - public: - /** - * @brief Constructor for LayerNetwork - * @param s is initiaizer for layers (it's sizes) - * @param lam is parametr for TransferFunction - * @param weightInit is weight initializer function - */ - LayerNetwork(std::initializer_list s, double lam=NeuralNetwork::lambda, - LayerNetworkInitializer weightInit= - [](const size_t&, const size_t &, const size_t &)->float{ return 1.0-((float)(rand()%2001))/1000.0;} - ); - virtual ~LayerNetwork(); - - /** - * @brief we don't want to allow network to be copied - */ - LayerNetwork(const LayerNetwork &f) = delete; //TODO - /** - * @brief we don't want to allow network to be assigned - */ - LayerNetwork operator=(const LayerNetwork &f)=delete; - - /** - * @brief computes output Solution from input Problem - */ - - virtual size_t size() const { return layers; }; - - virtual std::vector solve(const std::vector& input) override; - - virtual LayerNetworkLayer& operator[](const size_t& l) override; - protected: - void solvePart(float *newSolution, size_t begin, size_t end,size_t prevSize, float* sol,size_t layer); - private: - LayerNetworkLayer **ffLayers=nullptr; - float ***weights=nullptr; - float **outputs=nullptr; - float **inputs=nullptr; - ActivationFunction::ActivationFunction **transfer=nullptr; - BasisFunction::BasisFunction *basisFunction = new BasisFunction::FeedForward(); - size_t *layerSizes=nullptr; - size_t layers;/**< Number of layers */ - }; - -} -#endif \ No newline at end of file diff --git a/src/NeuralNetwork/Learning/BackPropagation b/src/NeuralNetwork/Learning/BackPropagation deleted file mode 120000 index e2bb82e..0000000 --- a/src/NeuralNetwork/Learning/BackPropagation +++ /dev/null @@ -1 +0,0 @@ -./BackPropagation.h \ No newline at end of file diff --git a/src/NeuralNetwork/Learning/BackPropagation.cpp b/src/NeuralNetwork/Learning/BackPropagation.cpp deleted file mode 100644 index 238bb24..0000000 --- a/src/NeuralNetwork/Learning/BackPropagation.cpp +++ /dev/null @@ -1,109 +0,0 @@ -#include "./BackPropagation" - -NeuralNetwork::Learning::BackPropagation::~BackPropagation() -{ - if(deltas!=nullptr) - { - for(size_t i=0;i& expectation) -{ - - if(deltas==nullptr) - { - deltas=new float*[network.size()]; - for(size_t i=0;i0;i--) - { - if(allowThreads) - { - std::vector th; - size_t s=0; - //TODO THIS IS NOT WORKING!!! - #define THREADS 4 - int step =network[i].size()/THREADS; - for(int t=1;t<=THREADS;t++) - { - if(s>=network[i].size()) - break; - th.push_back(std::thread([&i,this](size_t from, size_t to)->void{ - for(size_t j=from;jnetwork[i+1].size();k++) - { - deltasWeight+=deltas[i+1][k]*this->network[i+1][k].getWeight(j); - } - //deltas[i][j]*=this->network[i]->operator[](j)->derivatedOutput(); // WHY THE HELL IS SEQ here?? - } - },s,t==THREADS?network[i].size():s+step));//{} - s+=step; - } - for (auto& thr : th) - thr.join(); - }else - { - for(size_t j=0;jnetwork[i+1].size();k++) - { - deltasWeight+=deltas[i+1][k]*this->network[i+1][k].getWeight(j); - } - deltas[i][j]=deltasWeight*this->network[i][j].derivatedOutput(); - } - } - } - - for(size_t i=1;i& p, const std::vector& solution) -{ - std::vector a=network.solve(p); - double error=calculateError(solution,a); - - std::vector s; - if(noise) - { - for(size_t i=0;i -#include -#include - -#include "../LayerNetwork.h" -#include "Learning.h" - -/* - * http://sydney.edu.au/engineering/it/~comp4302/ann4-3s.pdf - * http://www.cs.cmu.edu/afs/cs/academic/class/15883-f13/slides/backprop.pdf - * http://airccse.org/journal/jcsit/0211ijcsit08.pdf - * http://www.cedar.buffalo.edu/~srihari/CSE574/Chap5/Chap5.3-BackProp.pdf - * http://stackoverflow.com/questions/13095938/can-somebody-please-explain-the-backpropagation-algorithm-to-me - * http://ufldl.stanford.edu/wiki/index.php/Backpropagation_Algorithm - * - * http://www.cleveralgorithms.com/nature-inspired/neural/backpropagation.html - * - */ - -namespace NeuralNetwork -{ -namespace Learning -{ - class BackPropagation : public Learning - { - public: - BackPropagation(LayerNetwork &n): Learning(), network(n) {} - virtual ~BackPropagation(); - - BackPropagation(const NeuralNetwork::Learning::BackPropagation&) =delete; - BackPropagation operator=(const NeuralNetwork::Learning::BackPropagation&) =delete; - - float teach(const std::vector&p,const std::vector&solution); - virtual void propagate(const std::vector& expectation); - - protected: - LayerNetwork &network; - inline virtual float correction(const float& expected, const float& computed) { return expected - computed;}; - - float **deltas=nullptr; - }; -} -} -#endif \ No newline at end of file diff --git a/src/NeuralNetwork/Learning/Learning.cpp b/src/NeuralNetwork/Learning/Learning.cpp deleted file mode 100644 index a2041c7..0000000 --- a/src/NeuralNetwork/Learning/Learning.cpp +++ /dev/null @@ -1,21 +0,0 @@ -#include "Learning.h" - -float NeuralNetwork::Learning::Learning::calculateError(const std::vector& expectation, const std::vector& solution) -{ - register float a=0; - for (size_t i=0;i,std::vector>> &set) -{ - double error=0; - for (register size_t i=0;i - -#include "../FeedForward.h" - -namespace NeuralNetwork -{ -namespace Learning -{ - const float LearningCoeficient=0.4; - class Learning - { - public: - Learning() {}; - inline virtual ~Learning() {}; - - inline virtual void setLearningCoeficient (const float& coef) { learningCoeficient=coef; }; - - inline virtual void allowThreading() final {allowThreads=1;} - inline virtual void disableThreading() final {allowThreads=0;} - - inline virtual void allowNoise() final {noise=1;} - inline virtual void disableNoise() final {noise=0;} - inline virtual void setNoiseSize(const unsigned& milipercents) final { noiseSize=milipercents; } - - float calculateError(const std::vector &expectation,const std::vector &solution); - virtual float teach(const std::vector &p,const std::vector &solution)=0; - virtual float teachSet(const std::vector,std::vector>> &set) final; - - protected: - float learningCoeficient=LearningCoeficient; - bool allowThreads=0; - bool noise=0; - unsigned noiseSize=500; - }; -} -} -#endif \ No newline at end of file diff --git a/src/NeuralNetwork/Learning/OpticalBackPropagation b/src/NeuralNetwork/Learning/OpticalBackPropagation deleted file mode 120000 index 5e0ab41..0000000 --- a/src/NeuralNetwork/Learning/OpticalBackPropagation +++ /dev/null @@ -1 +0,0 @@ -./OpticalBackPropagation.h \ No newline at end of file diff --git a/src/NeuralNetwork/Learning/OpticalBackPropagation.h b/src/NeuralNetwork/Learning/OpticalBackPropagation.h deleted file mode 100644 index cb45404..0000000 --- a/src/NeuralNetwork/Learning/OpticalBackPropagation.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef _OPT_BACK_PROPAGATION_H_ -#define _OPT_BACK_PROPAGATION_H_ - -#include "BackPropagation.h" - -/* - * http://proceedings.informingscience.org/InSITE2005/P106Otai.pdf - */ - -namespace NeuralNetwork -{ -namespace Learning -{ - class OpticalBackPropagation : public BackPropagation - { - public: - inline OpticalBackPropagation(LayerNetwork &n): BackPropagation(n) {} - protected: - virtual float correction(const float& expected, const float& computed) override - { - register float tmp=(expected-computed); - register float ret=1+exp(tmp*tmp); - return tmp < 0? -ret:ret; - }; - }; -} -} -#endif \ No newline at end of file diff --git a/src/NeuralNetwork/Makefile b/src/NeuralNetwork/Makefile deleted file mode 100644 index 629ea7f..0000000 --- a/src/NeuralNetwork/Makefile +++ /dev/null @@ -1,28 +0,0 @@ -OBJFILES=\ - LayerNetwork.o\ - Learning/Learning.o Learning/BackPropagation.o ../sse_mathfun.o - -LINKFILES= - -LIBNAME=NeuralNetwork - -include ../../Makefile.const - -all: lib - -../sse_mathfun.o: ../sse_mathfun.cpp ../sse_mathfun.h - make -C ../ - -lib: $(LIBNAME).so $(LIBNAME).a - -$(LIBNAME).so: $(OBJFILES) - $(CXX) -shared $(CXXFLAGS) $(OBJFILES) $(LINKFILES) -o $(LIBNAME).so - -$(LIBNAME).a: $(OBJFILES) ./Neuron.h ./Network.h ./ActivationFunction/ActivationFunction.h ./ActivationFunction/Sigmoid.h - rm -f $(LIBNAME).a # create new library - ar rcv $(LIBNAME).a $(OBJFILES) $(LINKFILES) - ranlib $(LIBNAME).a - nm --demangle $(LIBNAME).a > $(LIBNAME).nm - -clean: - @rm -f ./*.o ./*.so ./*.a ./*.nm ./*/*.o diff --git a/src/NeuralNetwork/Network.h b/src/NeuralNetwork/Network.h deleted file mode 100644 index 0477314..0000000 --- a/src/NeuralNetwork/Network.h +++ /dev/null @@ -1,93 +0,0 @@ -#ifndef _S_NN_NN_H_ -#define _S_NN_NN_H_ - -#include -#include - -#include "Neuron.h" - -namespace NeuralNetwork -{ - /** - * @brief Default value for lambda - */ - const float lambda=0.8; - - /** - * @author Tomas Cernik (Tom.Cernik@gmail.com) - * @brief Abstract class for all Layers of neurons - */ - class Layer - { - public: - - virtual ~Layer() {}; - - /** - * @brief This is a virtual function for selecting neuron - * @param neuron is position in layer - * @returns Specific neuron - */ - - virtual Neuron& operator[](const size_t& neuron)=0; - /** - * @returns Size of layer - */ - virtual size_t size() const=0; - }; - - /** - * @author Tomas Cernik (Tom.Cernik@gmail.com) - * @brief Abstract model of simple Network - */ - class Network - { - public: - /** - * @brief Constructor for Network - * @param lam is parametr for many TransferFunctions - */ - inline Network() {}; - - /** - * @brief Virtual destructor for Network - */ - virtual ~Network() {}; - - /** - * @brief This is a virtual function for all networks - * @param p is a Problem to be solved - * @returns Solution of Network for Problem - */ - virtual std::vector solve(const std::vector& input)=0; - - /** - * @brief Getter of layer - * @param layer is position fo layer - * @returns Retruns specified layer - */ - virtual Layer& operator[](const size_t &layer)=0; - - /** - * @brief Returns parametr for TransferFunctions - * @returns lambda (parametr for TransferFunctions) - */ - inline float getLambda() const {return lambda;} - - /** - * @param t is number of threads, if set to 0 or 1 then threading is disabled - * @brief Enables or disables Threaded computing of ANN - */ - - inline virtual void setThreads(const unsigned&t) final {threads=t;} - - protected: - - /** - * @brief Number of threads used by network - */ - unsigned threads=1; - }; - -} -#endif \ No newline at end of file diff --git a/src/NeuralNetwork/Perceptron.h b/src/NeuralNetwork/Perceptron.h deleted file mode 100644 index 4ae56e9..0000000 --- a/src/NeuralNetwork/Perceptron.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef _S_NN_PERCEP_H_ -#define _S_NN_PERCEP_H_ - -#include "./FeedForward" -#include "TransferFunction/Heaviside.h" - -namespace NeuralNetwork -{ - /** - * @author Tomas Cernik (Tom.Cernik@gmail.com) - * @brief Class reprezenting Perceptron - network with only 2 layer (input and output) with Heaviside transfer function - */ - class Perceptron:public FeedForward - { - public: - /** - * @brief Constructor for Perceptron network - * @param inputSize size of input Problem - * @param outputSize size of output Solution - */ - Perceptron(const size_t &inputSize, const size_t &outputSize):FeedForward({inputSize,outputSize}) - { - // < iterate throuht layers and set them to Heaviside Function - for(int i=0;i +std::vector NeuralNetwork::Recurrent::Network::computeOutput(const std::vector& input, unsigned int iterations) { + //TODO: check inputSize + size_t neuronSize=neurons.size(); + + std::vector outputs(neuronSize); + for(size_t i=0;i ret; + for(size_t i=0;i::digits10+1); + out << "{\n"; + out << "\t\"class\":\"NeuralNetwork::Recurrent::Network\",\n"; + out << "\t\"size\":" << neurons.size() << ",\n"; + out << "\t\"inputs\":" << inputSize << ",\n"; + out << "\t\"outputs\":" << outputSize << ",\n"; + + out << "\t\"neurons\":["; + + for(size_t i=0;i + +std::string NeuralNetwork::Recurrent::Neuron::stringify(const std::string &prefix) const { + std::ostringstream out; + out.precision(std::numeric_limits::digits10+1); + out <::digits10+1); + + out << prefix << "{\n"; + out << prefix << "\t\"class\": \"NeuralNetwork::Recurrent::Neuron\",\n"; + out << prefix << "\t\"id\": " << id() << ",\n"; + out << prefix << "\t\"bias\": " << getBias() << ",\n"; + out << prefix << "\t\"output\": " << output() << ",\n"; + out << prefix << "\t\"value\": " << value() << ",\n"; + out << prefix << "\t\"activationFunction\": " << activation->stringify() <<",\n"; + out << prefix << "\t\"basisFunction\": " << basis->stringify() <<",\n"; + out << prefix << "\t\"weights\": ["; + for(size_t j=0;j @@ -174,12 +174,10 @@ v4sf exp_ps(v4sf x) { #endif v4sf one = *(v4sf*)_ps_1; - x = _mm_min_ps(x, *(v4sf*)_ps_exp_hi); - x = _mm_max_ps(x, *(v4sf*)_ps_exp_lo); + x = _mm_max_ps( _mm_min_ps(x, *(v4sf*)_ps_exp_hi), *(v4sf*)_ps_exp_lo); /* express exp(x) as exp(g + n*log(2)) */ - fx = _mm_mul_ps(x, *(v4sf*)_ps_cephes_LOG2EF); - fx = _mm_add_ps(fx, *(v4sf*)_ps_0p5); + fx = _mm_add_ps(_mm_mul_ps(x, *(v4sf*)_ps_cephes_LOG2EF), *(v4sf*)_ps_0p5); /* how to perform a floorf with SSE: just below */ #ifndef USE_SSE2 @@ -195,36 +193,25 @@ v4sf exp_ps(v4sf x) { #endif /* if greater, substract 1 */ v4sf mask = _mm_cmpgt_ps(tmp, fx); - mask = _mm_and_ps(mask, one); - fx = _mm_sub_ps(tmp, mask); + fx = _mm_sub_ps(tmp, _mm_and_ps(mask, one)); - tmp = _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C1); - v4sf z = _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C2); - x = _mm_sub_ps(x, tmp); - x = _mm_sub_ps(x, z); + x = _mm_sub_ps(x, _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C1)); + x = _mm_sub_ps(x, _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C2)); - z = _mm_mul_ps(x,x); - v4sf y = *(v4sf*)_ps_cephes_exp_p0; - y = _mm_mul_ps(y, x); - y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p1); - y = _mm_mul_ps(y, x); - y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p2); - y = _mm_mul_ps(y, x); - y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p3); - y = _mm_mul_ps(y, x); - y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p4); - y = _mm_mul_ps(y, x); - y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p5); - y = _mm_mul_ps(y, z); + y = _mm_add_ps(_mm_mul_ps(y, x), *(v4sf*)_ps_cephes_exp_p1); + y = _mm_add_ps(_mm_mul_ps(y, x), *(v4sf*)_ps_cephes_exp_p2); + y = _mm_add_ps(_mm_mul_ps(y, x), *(v4sf*)_ps_cephes_exp_p3); + y = _mm_add_ps(_mm_mul_ps(y, x), *(v4sf*)_ps_cephes_exp_p4); + y = _mm_add_ps(_mm_mul_ps(y, x), *(v4sf*)_ps_cephes_exp_p5); + y = _mm_mul_ps(y, _mm_mul_ps(x,x)); y = _mm_add_ps(y, x); y = _mm_add_ps(y, one); /* build 2^n */ #ifndef USE_SSE2 - z = _mm_movehl_ps(z, fx); mm0 = _mm_cvttps_pi32(fx); - mm1 = _mm_cvttps_pi32(z); + mm1 = _mm_cvttps_pi32(_mm_movehl_ps( _mm_mul_ps(x,x), fx)); mm0 = _mm_add_pi32(mm0, *(v2si*)_pi32_0x7f); mm1 = _mm_add_pi32(mm1, *(v2si*)_pi32_0x7f); mm0 = _mm_slli_pi32(mm0, 23); @@ -234,10 +221,8 @@ v4sf exp_ps(v4sf x) { COPY_MM_TO_XMM(mm0, mm1, pow2n); _mm_empty(); #else - emm0 = _mm_cvttps_epi32(fx); - emm0 = _mm_add_epi32(emm0, *(v4si*)_pi32_0x7f); - emm0 = _mm_slli_epi32(emm0, 23); - v4sf pow2n = _mm_castsi128_ps(emm0); + emm0 = _mm_add_epi32(_mm_cvttps_epi32(fx), *(v4si*)_pi32_0x7f); + v4sf pow2n = _mm_castsi128_ps(_mm_slli_epi32(emm0, 23)); #endif y = _mm_mul_ps(y, pow2n); return y; diff --git a/tests/Makefile b/tests/Makefile index fe09c49..82cd018 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -1,45 +1,27 @@ include ../Makefile.const -OPTIMALIZATION= LIB_DIR = ../lib -#GEN_TESTS=g-01 g-02 -NN_TESTEABLE=\ - nn-01 nn-02 nn-03 nn-bp-sppeed \ - nn-bp-xor \ - nn-obp-xor \ - nn-rl-xor nn-rl-and nn-rl-xor2\ - nn-reinforcement nn-04 \ - nn-pong +ALL_TESTS=activation basis recurrent -NN_TESTS= $(NN_TESTEABLE) nn-pong - -ALL_TESTS=$(NN_TESTEABLE) $(GEN_TESTS) - -LIBS=$(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuralNetwork.a +#LIBS=$(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuralNetwork.a #LIBS=-lGenetics.so -lNeuronNetwork CXXFLAGS += -I$(LIB_DIR) -all:| lib $(ALL_TESTS); - -gen: $(GEN_TESTS) - +all:$(ALL_TESTS); test: all @for i in $(ALL_TESTS); do echo -n ./$$i; echo -n " - "; ./$$i; echo ""; done -g-%: g-%.cpp $(LIB_DIR)/Genetics.a - $(CXX) $(CXXFLAGS) $(OPTIMALIZATION) -o $@ $< $ $(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuralNetwork.a -lm +../src/NeuralNetwork.so: lib -nn-%: nn-%.cpp $(LIB_DIR)/NeuralNetwork.a - $(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuralNetwork.a -lm - -nn-pong: ./nn-pong.cpp $(LIB_DIR)/NeuronNetwork.a - $(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuronNetwork.a -lm -lalleg -lGL +%: %.cpp ../src/NeuralNetwork.so | lib %.cpp ../src/NeuralNetwork.so + $(CXX) $(CXXFLAGS) -I../include -o $@ $< $ -lm ../src/NeuralNetwork.so -msse4.2 -DHAVE_VECLIB lib: - make -C ../ + @make -C ../ clean: - @for i in $(ALL_TESTS);do rm -f $$i;done; \ No newline at end of file + @for i in $(ALL_TESTS);do rm -f $$i;done; + diff --git a/tests/activation.cpp b/tests/activation.cpp new file mode 100644 index 0000000..a0231bb --- /dev/null +++ b/tests/activation.cpp @@ -0,0 +1,59 @@ +#include +#include +#include + +#include +#include +#include + +union { + __m128 v; // SSE 4 x float vector + float a[4]; // scalar array of 4 floats +} U; + +int main() { + { + NeuralNetwork::ActivationFunction::Heaviside h(1.0); + assert(h(0.2) == 0); + assert(h(1.2) == 1); + } + + { + NeuralNetwork::ActivationFunction::Heaviside h(0.7); + assert(h(0.2) == 0); + assert(h(0.8) == 1); + } + + { + NeuralNetwork::ActivationFunction::Sigmoid s(0.7); + assert(s(0.1) > 0.517483); + assert(s(0.1) < 0.51750); + + assert(s(10) > 0.998989); + assert(s(10) < 0.999189); + } + { + NeuralNetwork::ActivationFunction::Sigmoid s(5); + assert(s(0.1) > 0.622359); + assert(s(0.1) < 0.622559); + + assert(s(0.7) > 0.970588); + assert(s(0.7) < 0.970788); + } + { + NeuralNetwork::ActivationFunction::Sigmoid s(0.7); + U.a[0]=0.1; + U.a[1]=10; + U.v=s(U.v); + + assert(U.a[0] > 0.517483); + assert(U.a[0] < 0.51750); + + assert(U.a[1] > 0.998989); + assert(U.a[1] < 0.999189); + } + + std::cout << "OK" << std::endl; + + return 0; +} \ No newline at end of file diff --git a/tests/basis.cpp b/tests/basis.cpp new file mode 100644 index 0000000..e5ec8a2 --- /dev/null +++ b/tests/basis.cpp @@ -0,0 +1,66 @@ +#include + +#include +#include +#include + +int main() { + { + NeuralNetwork::BasisFunction::Linear l; + assert(39.0==l.compute({1,2,3,5},{1,2,3,5})); + assert(39.0==l.computeStreaming({1,2,3,5},{1,2,3,5})); + } + { + NeuralNetwork::BasisFunction::Linear l; + assert(88.0==l.computeStreaming({1,2,3,5,7},{1,2,3,5,7})); + assert(88.0==l.compute({1,2,3,5,7},{1,2,3,5,7})); + } + { + NeuralNetwork::BasisFunction::Linear l; + std::vector w; + for(int in=0;in<100;in++) { + w.push_back(2); + } + assert(400.0==l.computeStreaming(w,w)); + assert(400.0==l.compute(w,w)); + } + { + NeuralNetwork::BasisFunction::Linear l; + std::vector w; + for(int in=0;in<55;in++) { + w.push_back(2); + } + assert(220.0==l.computeStreaming(w,w)); + assert(220.0==l.compute(w,w)); + } +/* + std::vector w; + std::vector i; + for(int in=0;in<100000;in++) { + w.push_back(2); + i.push_back(2); + } + + NeuralNetwork::BasisFunction::Linear l; + { + auto start = std::chrono::high_resolution_clock::now(); + for(int in=0;in<1000;in++) { + l.compute(w,i); + } + auto end = std::chrono::high_resolution_clock::now(); + std::chrono::duration diff = end-start; + std::cout << "C++ :" << diff.count() << " s\n"; + } + { + auto start = std::chrono::high_resolution_clock::now(); + for(int in=0;in<1000;in++) { + l.computeStreaming(w,i); + } + auto end = std::chrono::high_resolution_clock::now(); + std::chrono::duration diff = end-start; + std::cout << "SSE :" << diff.count() << " s\n"; + } +*/ + std::cout <<"OK" << std::endl; + +} \ No newline at end of file diff --git a/tests/nn-01.cpp b/tests/nn-01.cpp deleted file mode 100644 index 970be29..0000000 --- a/tests/nn-01.cpp +++ /dev/null @@ -1,47 +0,0 @@ -#include "../src/NeuralNetwork/FeedForward.h" -#include "../src/NeuralNetwork/Learning/BackPropagation" - -#include -#include - -//typedef Shin::NeuronNetwork::Problem X; - -class X: public Shin::Problem -{ - public: - X(const X& a) :Problem(a) {} - X(const std::vector &a):Problem() { for (bool s:a) data.push_back((float)s);} - protected: -}; - -int main(int argc,char**) -{ - srand(time(NULL)); - std::vector s; - std::vector p; - - // - s.push_back(Shin::Solution(std::vector({1}))); - p.push_back(X(std::vector({0}))); - - s.push_back(Shin::Solution(std::vector({0}))); - p.push_back(X(std::vector({1}))); - - NeuralNetwork::FeedForward q({1,5000,5000,15000,2}); - if(argc > 1) - { - std::cerr << "THREADING\n"; - q.setThreads(2); - } -#include - auto t1 = std::chrono::high_resolution_clock::now(); - for(int i=0;i<1000;i++) - { - //b.teach(p[i%2],s[i%2]); - q.solve(p[i%2])[0]; - //std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n"; - } - auto t2 = std::chrono::high_resolution_clock::now(); - std::cout << "Time: " << std::chrono::duration_cast(t2-t1).count() << std::endl; - -} \ No newline at end of file diff --git a/tests/nn-02.cpp b/tests/nn-02.cpp deleted file mode 100644 index e309286..0000000 --- a/tests/nn-02.cpp +++ /dev/null @@ -1,95 +0,0 @@ - -#include "../src/NeuralNetwork/FeedForward" - -#include - -class X: public Shin::Problem -{ - protected: - std::vector representation() const - { - return std::vector({1,1}); - } -}; - -class X1: public Shin::Problem -{ - protected: - std::vector representation() const - { - return std::vector({1}); - } -}; - -int main() -{ - NeuralNetwork::FeedForward n({2,4,2}); - NeuralNetwork::FeedForward nq({2,4,2}); - if(n[2].size() != 4) - { - std::cout << "1) Actual size:" << n[1].size(); - return 1; - } - - if(nq[1].size() != 4) - { - std::cout << "QUICK Actual size:" << nq[1].size(); - return 1; - } - - n[2][0].setPotential(25); - nq[2][0].setPotential(25); - - std::cout << "Potential: " << n[2][0].getPotential() << "\n"; - std::cout << "Potential: " << nq[2][0].getPotential() << "\n"; - - Shin::Solution s =n.solve(X()); - Shin::Solution sq =nq.solve(X()); - - if(s.size()!=2) - { - std::cout << "1"; - return 1; - } - - for(int i=0;i<2;i++) - { - if(s[i]!=sq[i]) - { - std::cout << " 4 - " << i << " expected "< -#include - -class X: public Shin::Problem -{ - public: - X(const X& a) :Problem(),q(a.q) {} - X(const std::vector &a):q(a) {} - std::vector representation() const - { - return q; - } - protected: - std::vector q; -}; - -int main() -{ - std::vector s; - std::vector p; - - // - s.push_back(Shin::Solution(std::vector({0}))); - p.push_back(X(std::vector({1,0}))); - s.push_back(Shin::Solution(std::vector({0}))); - p.push_back(X(std::vector({0,1}))); - s.push_back(Shin::Solution(std::vector({0}))); - p.push_back(X(std::vector({0,0}))); - s.push_back(Shin::Solution(std::vector({1}))); - p.push_back(X(std::vector({1,1}))); - - Shin::NeuralNetwork::FeedForward q({2,4,1}); - Shin::NeuralNetwork::Learning::BackPropagation b(q); - b.setLearningCoeficient(10); - - for(int i=0;i<4;i++) - { - b.teach(p[i%4],s[i%4]); - std::cerr << i%4 <<". FOR: [" << p[i%4].representation()[0] << "," < -class X: public Shin::Problem -{ - public: X(bool x,bool y):Problem() {data.push_back(x);data.push_back(y);} -}; - -int main() -{ - srand(time(NULL)); - int lm=5; - Shin::NeuralNetwork::FeedForward net({2,lm,1}); - bool x=1; - int prev_err=0; - int err=0; - int l; - int n; - int w; - int pot; - int wei; - int c=0; - std::cout << "\ntest 1 & 1 -" << net.solve(X(1,1))[0]; - std::cout << "\ntest 1 & 0 -" << net.solve(X(1,0))[0]; - std::cout << "\ntest 0 & 1 - " << net.solve(X(0,1))[0]; - std::cout << "\ntest 0 & 0- " << net.solve(X(0,0))[0]; - std::cout << "\n---------------------------------------"; - do{ - if(c%10000 ==1) - { - std::cout << "\nmixed"; - srand(time(NULL)); - } - err=0; - c++; - l=rand()%2+1; - n=rand()%lm; - w=rand()%2; - if(l==2) - n=0; - pot=net[l][n].getPotential(); - net[l][n].setPotential(pot*(rand()%21+90)/100); - wei=net[l][n].getWeight(w); - net[l][n].setWeight(w,wei*(rand()%21+90)/100); - - for(int i=0;i<100;i++) - { - bool x= rand()%2; - bool y=rand()%2; - Shin::Solution s =net.solve(X(x,y)); - if(s[0]!= (x xor y)) - err++; - } - - if(err > prev_err) - { - net[l][n].setPotential(pot); - net[l][n].setWeight(w,wei); - }; - prev_err=err; - if(err <1) - x=0; - }while(x); - std::cout << "\ntest 1 & 1 -" << net.solve(X(1,1))[0]; - std::cout << "\ntest 1 & 0 -" << net.solve(X(1,0))[0]; - std::cout << "\ntest 0 & 1 - " << net.solve(X(0,1))[0]; - std::cout << "\ntest 0 & 0- " << net.solve(X(0,0))[0]; - std::cout << "\nTotaly: " << c << "\n"; -} \ No newline at end of file diff --git a/tests/nn-05.cpp b/tests/nn-05.cpp deleted file mode 100644 index d7a4ab1..0000000 --- a/tests/nn-05.cpp +++ /dev/null @@ -1,71 +0,0 @@ - -#include "../src/NeuralNetwork/FeedForward" - -#include -#include - - -int main() -{ - - srand(time(NULL)); - NeuralNetwork::FeedForward ns({1,1}); - ns[1][0].setWeight(-1,0); - ns[1][0].setWeight(0,1); - - Shin::Solution ss =ns.solve(Shin::Problem({1})); - - if(ss[0] < 0.689874481 || ss[0] > 0.69) - { - std::cout << "1) Wrong counter: shoul be 0.626961, is: " << ss[0]; - return 1; - } - - NeuralNetwork::FeedForward xorF({2,2,1},0.8); - - xorF[1][0].setWeight(-1,-6.06); - xorF[1][0].setWeight(0,-11.62); - xorF[1][0].setWeight(1,10.99); - - xorF[1][1].setWeight(-1,-7.19); - xorF[1][1].setWeight(0,12.88); - xorF[1][1].setWeight(1,-13-13); - - xorF[2][0].setWeight(-1,-6.56); - xorF[2][0].setWeight(0,13.34); - xorF[2][0].setWeight(1,-7.19); - - ss= xorF.solve(Shin::Problem({0,1})); - - if(ss[0] > 1 || ss[0] < 0.98 ) - { - std::cout << "2) wrong output "<< ss[0] << "\n"; - return 1; - } - - ss= xorF.solve(Shin::Problem({0,1})); - - if(ss[0] > 1 || ss[0] < 0.98 ) - { - std::cout << "3) wrong output "<< ss[0]; - return 1; - } - - ss= xorF.solve(Shin::Problem({0,0})); - - if(ss[0] <0 || ss[0] > 0.06 ) - { - std::cout << "4) wrong output "<< ss[0] ; - return 1; - } - - ss= xorF.solve(Shin::Problem({1,1})); - - if(ss[0] <0 || ss[0] > 0.06 ) - { - std::cout << "5) wrong output "<< ss[0]; - return 1; - } - - return 0; -} \ No newline at end of file diff --git a/tests/nn-bp-sppeed.cpp b/tests/nn-bp-sppeed.cpp deleted file mode 100644 index 84b2de7..0000000 --- a/tests/nn-bp-sppeed.cpp +++ /dev/null @@ -1,40 +0,0 @@ -#include "../src/NeuralNetwork/FeedForward" -#include "../src/NeuralNetwork/Learning/BackPropagation" - -#include -#include - -class X: public Shin::Problem -{ - public: - X(const X& a) :Problem(a.data) {} - X(const std::vector &a):Problem(a) {} -}; - -int main(int argc, char**) -{ - srand(time(NULL)); - std::vector s; - std::vector p; - - // - s.push_back(Shin::Solution(std::vector({1}))); - p.push_back(X(std::vector({0}))); - - s.push_back(Shin::Solution(std::vector({0}))); - p.push_back(X(std::vector({1}))); - - Shin::NeuralNetwork::FeedForward q({1,5000,5000,5000,1}); - Shin::NeuralNetwork::Learning::BackPropagation b(q); - - if(argc >1) - { - std::cerr << "Allowing threadnig\n"; - b.allowThreading(); - } - for(int i=0;i<2;i++) - { - b.teach(p[i%2],s[i%2]); - std::cerr << i%2 <<". FOR: [" << p[i%2][0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n"; - } -} \ No newline at end of file diff --git a/tests/nn-bp-xor.cpp b/tests/nn-bp-xor.cpp deleted file mode 100644 index 479ca5b..0000000 --- a/tests/nn-bp-xor.cpp +++ /dev/null @@ -1,61 +0,0 @@ -#include "../src/NeuralNetwork/FeedForward" -#include "../src/NeuralNetwork/Learning/BackPropagation" - -#include -#include - -class X: public Shin::Problem -{ - public: - X(const X& a) :Problem(a) {} - X(const std::vector &a):Problem() {data=a;} -}; - -int main() -{ - srand(time(NULL)); - - for (int test=0;test<2;test++) - { - Shin::NeuralNetwork::FeedForward q({2,3,1}); - Shin::NeuralNetwork::Learning::BackPropagation b(q); - - std::vector > set; - set.push_back(std::pair(Shin::Problem({0,0}),Shin::Solution({0}))); - set.push_back(std::pair(Shin::Problem({1,0}),Shin::Solution({1}))); - set.push_back(std::pair(Shin::Problem({1,1}),Shin::Solution({0}))); - set.push_back(std::pair(Shin::Problem({0,1}),Shin::Solution({1}))); - if(test) - { - std::cerr << "Testing with entropy\n"; - b.allowNoise(); - }else - { - std::cerr << "Testing without entropy\n"; - } - b.setLearningCoeficient(20);//8); - for(int j=0;;j++) - { - double err=b.teachSet(set); - if(err <0.3) - { -// b.setLearningCoeficient(5); - } - if(err <0.1) - { -// b.setLearningCoeficient(0.2); - } - if(err <0.001) - { - std::cerr << j << "(" << err <<"):\n"; - for(int i=0;i<4;i++) - { - std::cerr << "\t" << i%4 <<". FOR: [" << set[i%4].first[0] << "," < -#include - -class X: public Shin::Problem -{ - public: - X(const X& a) :Problem(a) {} - X(const std::vector &a):Problem() {data=a;} -}; - -int main() -{ - srand(time(NULL)); - for (int test=0;test<2;test++) - { - Shin::NeuralNetwork::FeedForward q({2,40,1}); - Shin::NeuralNetwork::Learning::OpticalBackPropagation b(q); - b.setLearningCoeficient(0.1); - - std::vector > set; - set.push_back(std::pair(Shin::Problem({0,0}),Shin::Solution({0}))); - set.push_back(std::pair(Shin::Problem({1,0}),Shin::Solution({1}))); - set.push_back(std::pair(Shin::Problem({1,1}),Shin::Solution({0}))); - set.push_back(std::pair(Shin::Problem({0,1}),Shin::Solution({1}))); - if(test) - { - std::cerr << "Testing with entropy\n"; - b.allowNoise(); - }else - { - std::cerr << "Testing without entropy\n"; - } - for(int j=0;;j++) - { - double err=b.teachSet(set); - if(err <0.3) - { -// b.setLearningCoeficient(5); - } - if(err <0.1) - { -// b.setLearningCoeficient(0.2); - } - if(err <0.001) - { - std::cerr << j << "(" << err <<"):\n"; - for(int i=0;i<4;i++) - { - std::cerr << "\t" << i%4 <<". FOR: [" << set[i%4].first[0] << "," < -#include -#include -#include "../src/NeuronNetwork/Learning/QLearning.h" -#include - -int learningGames=6000; - -int ball_x = 320; -int ball_y = 240; - -int ball_tempX = 320; -int ball_tempY = 240; - -int p1_x = 20; -int p1_y = 210; - -int p1_tempX = 20; -int p1_tempY = 210; - -int p2_x = 620; -int p2_y = 210; - -int p2_tempX = 620; -int p2_tempY = 210; - -int i=0; - -long game=0; -int q=0; -int speed=1; - -bool randomLearner=0; - -int dir; //This will keep track of the circles direction - //1= up and left, 2 = down and left, 3= up and right, 4 = down and right - -BITMAP *buffer; //This will be our temporary bitmap for double buffering - -class X: public Shin::NeuronNetwork::Problem -{ - public: - X(int p1,int ballX,int ballY,int p2)//, int ballY) - { - data.push_back((float)p1/480.0); - data.push_back((float)ballX/640.0); - data.push_back((float)ballY/480.0); - } -}; - -Shin::NeuronNetwork::Learning::QLearning l(3,15,3); - -std::vector > p1x; - -void propagateOKtoP1(double quality=10) -{ - l.learnDelayed(p1x,quality); - p1x.clear(); -} - -void moveBall(){ - - ball_tempX = ball_x; - ball_tempY = ball_y; - - if (dir == 1 && ball_x > 5 && ball_y > 5){ - - if( ball_x == p1_x + 15 && ball_y >= p1_y && ball_y <= p1_y + 60){ - dir = rand()% 2 + 3; - propagateOKtoP1(100); - }else{ - --ball_x; - --ball_y; - } - - } else if (dir == 2 && ball_x > 5 && ball_y < 475){ - - if( ball_x == p1_x + 15 && ball_y >= p1_y && ball_y <= p1_y + 60){ - dir = rand()% 2 + 3; - propagateOKtoP1(100); - }else{ - --ball_x; - ++ball_y; - } - - } else if (dir == 3 && ball_x < 635 && ball_y > 5){ - - if( ball_x + 5 == p2_x && ball_y >= p2_y && ball_y <= p2_y + 60){ - dir = rand()% 2 + 1; - }else{ - ++ball_x; - --ball_y; - } - - } else if (dir == 4 && ball_x < 635 && ball_y < 475){ - - if( ball_x + 5 == p2_x && ball_y >= p2_y && ball_y <= p2_y + 60){ - dir = rand()% 2 + 1; - }else{ - ++ball_x; - ++ball_y; - } - - } else { - - if (dir == 1 || dir == 3) ++dir; - else if (dir == 2 || dir == 4) --dir; - - } - -} - -char p1Move(){ - - X p=X(p1_y,ball_x,ball_y,p2_y); - - if(game (p,2));//,ball_tempX,ball_tempY)); - return 1; - }else if(tmp==0) - { - p1x.push_back(std::pair(p,0));//,ball_tempX,ball_tempY)); - return -1; - }else - { - p1x.push_back(std::pair(p,1));//,ball_tempX,ball_tempY)); - return 0; - } - }else - { - if( p1_tempY > ball_y && p1_y > 0){ - p1x.push_back(std::pair(p,0));//,ball_tempX,ball_tempY)); - return -1; - } else if( p1_tempY < ball_y && p1_y < 420){ - p1x.push_back(std::pair(p,2));//,ball_tempX,ball_tempY)); - return 1; - }else - { - p1x.push_back(std::pair(p,1));//,ball_tempX,ball_tempY)); - return 0; - } - } - } - int j=l.getChoice(p); - - p1x.push_back(std::pair(p,j));//,ball_tempX,ball_tempY)); - - return j-1; -} - -char p2Move(){ - if(game >= learningGames) - { - if(key[KEY_UP]) - return 1; - else if( key[KEY_DOWN]) - return -1; - else - return 0; - }else - { - if(rand()%10==0) - { - return (rand()%3)-1; - } - if( p2_tempY > ball_y){ - return -1; - } else if( p2_tempY < ball_y){ - return 1; - } - return 0; - } -} - -void startNew(){ - - clear_keybuf(); - if(game==learningGames) - textout_ex( screen, font, "Player 1 learned! Push a button to start a game.", 160, 240, makecol( 255, 0, 0), makecol( 0, 0, 0)); - - if(game >= learningGames) - readkey(); - - clear_to_color( buffer, makecol( 0, 0, 0)); - ball_x = 350; - ball_y = rand()%481; - - p1_x = 20; - p1_y = 210; - - p2_x = 620; - p2_y = 210; - -} - - -void checkWin(){ - - int won=0; - if ( ball_x < p1_x){ - won=1; - game++; - textout_ex( screen, font, "Player 2 Wins!", 320, 240, makecol( 255, 0, 0), makecol( 0, 0, 0)); - propagateOKtoP1(-100); - startNew(); - - } else if ( ball_x > p2_x){ - game++; - won=1; - textout_ex( screen, font, "Player 1 Wins!", 320, 240, makecol( 255, 0, 0), makecol( 0, 0, 0)); - propagateOKtoP1(100); - startNew(); - } - - -} - -void setupGame(){ - - acquire_screen(); - rectfill( buffer, p1_x, p1_y, p1_x + 10, p1_y + 60, makecol ( 0, 0, 255)); - rectfill( buffer, p2_x, p2_y, p2_x + 10, p2_y + 60, makecol ( 0, 0, 255)); - circlefill ( buffer, ball_x, ball_y, 5, makecol( 128, 255, 0)); - draw_sprite( screen, buffer, 0, 0); - release_screen(); - srand( time(NULL)); - dir = rand() % 4 + 1; - -} - - -int main(int argc, char**argv) -{ - allegro_init(); - install_keyboard(); - set_color_depth(16); - set_gfx_mode( GFX_AUTODETECT_WINDOWED, 640, 480, 0, 0); - - l.setLearningCoeficient(0.01,0.01); - if(argc>=4 && argv[3][0]=='o') - { - std::cerr << "USING Optical Backpropagation\n"; - l.opticalBackPropagation(); - } - if(argc>=3) - { - std::cerr << "Setting learning coefficients to:" << atof(argv[1]) << "," << atof(argv[2]) << "\n"; - l.setLearningCoeficient(atof(argv[1]),atof(argv[2])); - } - if(argc >=5) - { - std::cerr << "Setting learning games to:" << atof(argv[4]) << "\n"; - learningGames=atof(argv[4]); - } - if(argc >=6 && argv[5][0]=='r') - { - std::cerr << "Setting random learning\n"; - randomLearner=1; - } - buffer = create_bitmap( 640, 480); - setupGame(); - speed=51; - int sleepTime=1000; - while(!key[KEY_ESC]) - { - q++; - if(key[KEY_T]) - { - std::cout << "ADDING next 500 learning games\n"; - usleep(500000); - learningGames+=500; - } - if(game < learningGames) - { - if( key[KEY_UP] && speed < 200){ - speed+=5; - }else if( key[KEY_DOWN] && speed >1 ){ - speed-=5; - } - if(speed <= 0) - { - speed=1; - } - }else - { - speed=1; - } - - register char p1dir=p1Move(); - register char p2dir=p2Move(); - - p1_tempY = p1_y; - p2_tempY = p2_y; - - if(p1dir < 0 && p1_y > 0){ - --p1_y; - } else if( p1dir > 0 && p1_y < 420){ - ++p1_y; - } - if(p2dir > 0 && p2_y > 0){ - --p2_y; - } else if( p2dir < 0 && p2_y < 420){ - ++p2_y; - } - moveBall(); - if(key[KEY_PLUS_PAD] && sleepTime >=10) - sleepTime-=50; - else if(key[KEY_MINUS_PAD] && sleepTime <=15000) - sleepTime+=50; - - if(i%speed==0) - { - acquire_screen(); - rectfill( buffer, p1_tempX, p1_tempY, p1_tempX + 10, p1_tempY + 60, makecol ( 0, 0, 0)); - rectfill( buffer, p1_x, p1_y, p1_x + 10, p1_y + 60, makecol ( 0, 0, 255)); - - rectfill( buffer, p2_tempX, p2_tempY, p2_tempX + 10, p2_tempY + 60, makecol ( 0, 0, 0)); - rectfill( buffer, p2_x, p2_y, p2_x + 10, p2_y + 60, makecol ( 0, 0, 255)); - - circlefill ( buffer, ball_tempX, ball_tempY, 5, makecol( 0, 0, 0)); - circlefill ( buffer, ball_x, ball_y, 5, makecol( 128, 255, 0)); - draw_sprite( screen, buffer, 0, 0); - release_screen(); - usleep(sleepTime); - } - checkWin(); - i++; - } - - return 0; - -} - -END_OF_MAIN() diff --git a/tests/nn-reinforcement.cpp b/tests/nn-reinforcement.cpp deleted file mode 100644 index 27ec3c9..0000000 --- a/tests/nn-reinforcement.cpp +++ /dev/null @@ -1,95 +0,0 @@ -#include "../src/NeuronNetwork/FeedForward" -#include "../src/NeuronNetwork/Learning/Reinforcement.h" -#include "../src/NeuronNetwork/Solution.h" - -#include -#include - -class X: public Shin::NeuronNetwork::Problem -{ - public: - X(const X& a) :q(a.q) {} - X(const std::vector &a):q(a) {} - std::vector representation() const - { - return q; - } - protected: - std::vector q; -}; - -int main() -{ - srand(time(NULL)); - - std::vector p; - - p.push_back(X(std::vector({0,0}))); - - p.push_back(X(std::vector({1,1}))); - - Shin::NeuronNetwork::FeedForward q({2,6,2}); - Shin::NeuronNetwork::Learning::Reinforcement b(q); - b.getPropagator().setLearningCoeficient(1); - int i=0; - b.setQualityFunction( - [&i](const Shin::NeuronNetwork::Problem &,const Shin::NeuronNetwork::Solution &s)->float - { - if(i%2==0) - { - //ocekavame 1 - int e=(s[0]-0.80)*15.0;//+(abs(s[1])-0.5)*100.0; - return e; - }else - { - //ocekavame 0 - int e=(0.20-s[0])*15.0;//+(0.4-abs(s[1]))*100.0; - return e; - } - return 1.0; - }); - for(i=0;i < 500000000;i++) - { - if(i==75000) - { - std::cerr << "SSSSSS1XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n"; - b.setCoef(1); - } - if(i==150000) - { - std::cerr << "SSSSSS1XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n"; - b.setCoef(0.51); - } - if(i==300000) - { - std::cerr << "SSSSSS2XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n"; - b.setCoef(0.15); - } - b.learn(p[i%2]); - - if(i%100000==0) - srand(time(NULL)); - if(i%10000==0) - for(int j=0;j<2;j++) - { - std::cerr << j%4 <<". FOR: [" << p[j%4].representation()[0] << "," < -#include - -class X: public Shin::NeuronNetwork::Problem -{ - public: - X(const X& a) :Problem(a) {} - X(const std::vector &a):Problem() {data=a;} -}; - -int main() -{ - srand(time(NULL)); - - std::vector p; - - p.push_back(new X(std::vector({0,0}))); - - p.push_back(new X(std::vector({1,1}))); - - p.push_back(new X(std::vector({1,0}))); - p.push_back(new X(std::vector({0,1}))); - - Shin::NeuronNetwork::FeedForward q({2,1}); - Shin::NeuronNetwork::Learning::Reinforcement b(q); - int i=0; - double targetQuality=0.5; - b.setQualityFunction( - [](const Shin::NeuronNetwork::Problem &pr,const Shin::NeuronNetwork::Solution &s)->float - { - if(pr[0]==1 && pr[1]==1) - { - //ocekavame 1 - int e=(s[0]-0.80)*15.0;//+(abs(s[1])-0.5)*100.0; - return e; - }else - { - //ocekavame 0 - int e=(0.20-s[0])*15.0;//+(0.4-abs(s[1]))*100.0; - return e; - } - return 1.0; - }); - for(i=0;i < 500000000;i++) - { - double err=b.learnSet(p); - - if(i%100000==0) - srand(time(NULL)); - if(err > targetQuality||i%1000==0) - { - std::cerr << i << " ("<< err <<").\n"; - for(int j=0;j<4;j++) - { - std::cerr << j%4 <<". FOR: [" << p[j%4]->operator[](0) << "," <operator[](0) << "] res: " << q.solve(*p[j%4])[0] << "\n"; - } - } - if(err >targetQuality) - break; - } - -/* int i=0; - std::cerr << i%4 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n"; - - for(int i=0;i<2000;i++)sa - { - b.teach(p[i%2],s[i%2]); - std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n"; - } - b.debugOn(); - for(int i=0;i<2;i++) - { - b.teach(p[i%2],s[i%2]); - std::cerr << i%4 <<". FOR: [" << p[i%4].representation()[0] << "," < -#include - - -class X: public Shin::NeuronNetwork::Problem -{ - public: - X(const X& a) :Problem(a) {} - X(const std::vector &a):Problem() {data=a;} -}; - - -int main() -{ - srand(time(NULL)); - for (int test=0;test<3;test++) - { - Shin::NeuronNetwork::FeedForward q({2,4,1}); - Shin::NeuronNetwork::Learning::Reinforcement b(q); - //b.setPropagator(new Shin::NeuronNetwork::Learning::OpticalBackPropagation(q)); - b.getPropagator().setLearningCoeficient(0.4); - //b.getPropagator().allowEntropy(); - double targetQuality =2.9; - if(test==2) - { - targetQuality =1.62; - std::cerr << "Testing with OBP ...\n"; - - b.setPropagator(new Shin::NeuronNetwork::Learning::OpticalBackPropagation(q)); - b.getPropagator().setLearningCoeficient(0.5); - } - b.setQualityFunction( - [](const Shin::NeuronNetwork::Problem &p,const Shin::NeuronNetwork::Solution &s)->float - { - float expect=0.0; - if(p[0] && p[1]) - expect=0; - else if(p[0] && !p[1]) - expect=1; - else if(!p[0] && !p[1]) - expect=0; - else if(!p[0] && p[1]) - expect=1; - -// std::cerr << "expected: " << expect << " got " << s[0]; - - if(expect==0) - { - expect=0.3-abs(s[0]); - }else - { - expect=s[0]-0.7; - } - -// std::cerr << " returnning " << expect*5.0 << "\n"; - - return expect*19.0; - }); - - std::vector p; - - p.push_back(new X(std::vector({0,0}))); - p.push_back( new X(std::vector({1,0}))); - p.push_back( new X(std::vector({0,1}))); - p.push_back(new X(std::vector({1,1}))); - - if(test==1) - { - std::cerr << "Testing with entropy ...\n"; - b.getPropagator().allowNoise(); - }else - { - std::cerr << "Testing without entropy ...\n"; - } - - for(int i=0;i < 500000000;i++) -// for(int i=0;i < 5;i++) - { - double err=b.learnSet(p); - if(i%100000==0) - srand(time(NULL)); - if(i%200000==0 || err > targetQuality) - { - std::cerr << i << " ("<< err <<").\n"; - for(int j=0;j<4;j++) - { - std::cerr << "\t" << i%4 <<". FOR: [" << p[j%4]->operator[](0) << "," <operator[](1) << "] res: " << - q.solve(*p[j%4])[0] << "\n"; - } - } - if(err >targetQuality) - break; - } - } -} \ No newline at end of file diff --git a/tests/nn-rl-xor2.cpp b/tests/nn-rl-xor2.cpp deleted file mode 100644 index 8afd438..0000000 --- a/tests/nn-rl-xor2.cpp +++ /dev/null @@ -1,99 +0,0 @@ -#include "../src/NeuronNetwork/Learning/QLearning.h" - -#include -#include - - -class X: public Shin::NeuronNetwork::Problem -{ - public: - X(const X& a) :Problem(a) {} - X(const std::vector &a):Problem() {data=a;} -}; - -float atof(char *s) -{ - int f, m, sign, d=1; - f = m = 0; - - sign = (s[0] == '-') ? -1 : 1; - if (s[0] == '-' || s[0] == '+') s++; - - for (; *s != '.' && *s; s++) { - f = (*s-'0') + f*10; - } - if (*s == '.') - for (++s; *s; s++) { - m = (*s-'0') + m*10; - d *= 10; - } - return sign*(f + (float)m/d); -} - -float AA=10; -float getQuality(X& p, int action) -{ - if((p[0]==0&& p[1]==0) ||(p[0]==1&& p[1]==1)) //should be 0 - { - return action==1?-AA:AA; - }else // should be 1 - { - return action==0?-AA:AA; - } -} - -int main(int argc, char **argv) -{ - srand(time(NULL)); - - Shin::NeuronNetwork::Learning::QLearning l(2,45,2); - if(argc==4 && argv[3][0]=='o') - { - std::cerr << "USING Optical Backpropagation\n"; - l.opticalBackPropagation(); - } - if(argc>=3) - { - std::cerr << "Setting learning coefficients to:" << atof(argv[1]) << "," << atof(argv[2]) << "\n"; - l.setLearningCoeficient(atof(argv[1]),atof(argv[2])); - } - std::vector > p1x; - - std::vector states; - states.push_back(X(std::vector({1,0}))); - states.push_back(X(std::vector({0,0}))); - states.push_back(X(std::vector({1,1}))); - states.push_back(X(std::vector({0,1}))); - - unsigned long step=0; - double quality=0; - while(step< 600000 && quality < (3.9*AA)) - { - quality=0; - if(step%10000==0) - std::cerr << "STEP " << step << "\n"; - for(unsigned i=0;i -#include - -//typedef Shin::NeuronNetwork::Problem X; - -class X: public Shin::Problem -{ - public: - X(const X& a) :Problem(a) {} - X(const std::vector &a):Problem() {for(auto q:a){ data.push_back(q);}} - protected: -}; -int main(int argc,char**) -{ - srand(time(NULL)); - std::vector s; - std::vector p; - - p.push_back(X(std::vector({0,0}))); - s.push_back(Shin::Solution(std::vector({0.4,0.3,0.2,0.1}))); - p.push_back(X(std::vector({0,0.5}))); - s.push_back(Shin::Solution(std::vector({0.6,0.3,0.2,0.5}))); - p.push_back(X(std::vector({0.4,0.5}))); - s.push_back(Shin::Solution(std::vector({0.4,0.4,0.2,0.8}))); - Shin::NeuralNetwork::FeedForward q({2,4,4,4},1.0); - Shin::NeuralNetwork::Learning::BackPropagation bp(q); - bp.setLearningCoeficient(0.2); - for(int i=0;i<3;i++) - { - Shin::Solution sp =q.solve(p[i]); - std::cerr << sp[0] << "," << sp[1] << "," << sp[2] << "," << sp[3] << "\n"; - } - for(int i=0;i<4;i++) - { - for(int j=0;j<3;j++) - { - bp.teach(p[j],s[j]); - } - } - std::cerr << "XXXXXXXXXXXX\n"; - for(int i=0;i<3;i++) - { - Shin::Solution sp =q.solve(p[i]); - std::cerr << sp[0] << "," << sp[1] << "," << sp[2] << "," << sp[3] << "\n"; - } -} \ No newline at end of file diff --git a/tests/recurrent.cpp b/tests/recurrent.cpp new file mode 100644 index 0000000..673e41c --- /dev/null +++ b/tests/recurrent.cpp @@ -0,0 +1,30 @@ +#include + +#include +int main() { + NeuralNetwork::Recurrent::Network a(2,1,1); +/* a.getNeurons()[3].setWeight(a.getNeurons()[2],0.00000001565598595); + a.getNeurons()[2].setWeight(a.getNeurons()[3],0.00000001565598595); + a.getNeurons()[3].setWeight(a.getNeurons()[1],0.00000001565598595); + a.getNeurons()[3].setWeight(a.getNeurons()[0],0.00000001565598595); + + a.computeOutput({0.5,0}); + + std::cout << a; + + NeuralNetwork::Recurrent::Network b(a.stringify()); +*/ + + a.getNeurons()[3].setWeight(a.getNeurons()[0],0.05); + a.getNeurons()[3].setWeight(a.getNeurons()[1],0.05); + a.getNeurons()[3].setWeight(a.getNeurons()[2],0.7); + a.getNeurons()[2].setWeight(a.getNeurons()[3],0.1); + + std::cout << a; + + for(int i=0;i<40;i++) { + std::cout << a.computeOutput({1,0.7})[0] << "\n"; + } + std::cout << a; + +} \ No newline at end of file