reffactored and recurrent implementation
This commit is contained in:
14
Doxyfile
14
Doxyfile
@@ -437,7 +437,7 @@ WARN_LOGFILE =
|
||||
# directories like "/usr/src/myproject". Separate the files or directories
|
||||
# with spaces.
|
||||
|
||||
INPUT = "./src/" "mainpage.dox"
|
||||
INPUT = "./include/" "mainpage.dox"
|
||||
|
||||
# If the value of the INPUT tag contains directories, you can use the
|
||||
# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
|
||||
@@ -873,18 +873,6 @@ GENERATE_XML = NO
|
||||
|
||||
XML_OUTPUT = xml
|
||||
|
||||
# The XML_SCHEMA tag can be used to specify an XML schema,
|
||||
# which can be used by a validating XML parser to check the
|
||||
# syntax of the XML files.
|
||||
|
||||
XML_SCHEMA =
|
||||
|
||||
# The XML_DTD tag can be used to specify an XML DTD,
|
||||
# which can be used by a validating XML parser to check the
|
||||
# syntax of the XML files.
|
||||
|
||||
XML_DTD =
|
||||
|
||||
# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
|
||||
# dump the program listings (including syntax highlighting
|
||||
# and cross-referencing information) to the XML output. Note that
|
||||
|
||||
34
Makefile
34
Makefile
@@ -7,46 +7,32 @@ all:|pre libs
|
||||
pre:
|
||||
@mkdir -p lib
|
||||
|
||||
libs: nn
|
||||
|
||||
test: all
|
||||
make -C tests
|
||||
install: |all _install
|
||||
|
||||
_install:
|
||||
install: all
|
||||
@mkdir -p build/lib
|
||||
@cp lib/*.so build/lib/
|
||||
|
||||
nn: | nn_build lib/NeuralNetwork.a lib/NeuralNetwork.so
|
||||
libs: | nn_build lib/NeuralNetwork.a lib/NeuralNetwork.so
|
||||
|
||||
lib/NeuralNetwork.so: ./src/NeuralNetwork/NeuralNetwork.so
|
||||
cp ./src/NeuralNetwork/NeuralNetwork.so ./lib/
|
||||
lib/NeuralNetwork.so: ./src/NeuralNetwork.so
|
||||
cp ./src/NeuralNetwork.so ./lib/
|
||||
|
||||
lib/NeuralNetwork.a: ./src/NeuralNetwork/NeuralNetwork.a
|
||||
cp ./src/NeuralNetwork/NeuralNetwork.a ./lib/
|
||||
cp ./src/NeuralNetwork/NeuralNetwork.nm ./lib/
|
||||
lib/NeuralNetwork.a: ./src/NeuralNetwork.a
|
||||
cp ./src/NeuralNetwork.a ./lib/
|
||||
cp ./src/NeuralNetwork.nm ./lib/
|
||||
|
||||
nn_build:
|
||||
@make -C src/NeuralNetwork
|
||||
|
||||
genetics: | genetics_build lib/Genetics.a lib/Genetics.so
|
||||
|
||||
lib/Genetics.so: ./src/Genetics/Genetics.so
|
||||
cp ./src/Genetics/Genetics.so ./lib/
|
||||
|
||||
lib/Genetics.a: ./src/Genetics/Genetics.a
|
||||
cp ./src/Genetics/Genetics.a ./lib/
|
||||
cp ./src/Genetics/Genetics.nm ./lib/
|
||||
|
||||
genetics_build:
|
||||
@make -C src/Genetics
|
||||
@make -C src/
|
||||
|
||||
documentation:
|
||||
doxygen
|
||||
|
||||
clean:
|
||||
@make -C src/NeuralNetwork clean
|
||||
@make -C src clean
|
||||
@make -C tests clean
|
||||
#@rm -f ./*.so ./*.a ./*.nm
|
||||
@rm -f ./lib/*.so ./lib/*.a ./lib/*.nm
|
||||
|
||||
@echo "Cleaned....."
|
||||
|
||||
@@ -4,9 +4,9 @@ CXXFLAGS+= -std=c++14
|
||||
#-fprefetch-loop-arrays
|
||||
CXXFLAGS+= -pg -fPIC
|
||||
CXXFLAGS+= -g
|
||||
CXXFLAGS+= -fPIC -pthread
|
||||
CXXFLAGS+= -fPIC -pthread
|
||||
#CXXFLAGS+= -DUSE_SSE2
|
||||
OPTIMALIZATION = -O3 -march=native -mtune=native
|
||||
OPTIMALIZATION = -O3 -march=native -mtune=native
|
||||
|
||||
%.o : %.cpp %.h
|
||||
$(CXX) $(CXXFLAGS) $(OPTIMALIZATION) -c $< -o $@
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace NeuralNetwork {
|
||||
namespace ActivationFunction {
|
||||
|
||||
/**
|
||||
* @author Tomas Cernik (Tom.Cernik@gmail.com)
|
||||
* @brief Abstract class of activation function
|
||||
*/
|
||||
class ActivationFunction {
|
||||
public:
|
||||
|
||||
virtual ~ActivationFunction() {}
|
||||
|
||||
/**
|
||||
* @brief Returns derivation of output
|
||||
* @param input is input of function
|
||||
* @param output is output of function
|
||||
*/
|
||||
virtual float derivatedOutput(const float &input,const float &output)=0;
|
||||
|
||||
/**
|
||||
* @brief Returns value of output
|
||||
* @param x is input of function
|
||||
*/
|
||||
virtual float operator()(const float &x)=0;
|
||||
|
||||
/**
|
||||
* @brief Function returns clone of object
|
||||
*/
|
||||
virtual ActivationFunction* clone() const = 0;
|
||||
|
||||
/**
|
||||
* @brief This is a virtual function for storing Activation function
|
||||
* @returns json describing function
|
||||
*/
|
||||
virtual std::string stringify() const =0;
|
||||
};
|
||||
}
|
||||
}
|
||||
26
include/NeuralNetwork/ActivationFunction/Heaviside.h
Normal file
26
include/NeuralNetwork/ActivationFunction/Heaviside.h
Normal file
@@ -0,0 +1,26 @@
|
||||
#pragma once
|
||||
|
||||
#include "./ActivationFunction.h"
|
||||
|
||||
namespace NeuralNetwork {
|
||||
namespace ActivationFunction {
|
||||
|
||||
class Heaviside: public ActivationFunction {
|
||||
public:
|
||||
Heaviside(const float &lambdaP=1.0): lambda(lambdaP) {}
|
||||
inline virtual float derivatedOutput(const float &,const float &) override { return 1.0; }
|
||||
inline virtual float operator()(const float &x) override { return x>lambda ? 1.0f : 0.0f; };
|
||||
|
||||
virtual ActivationFunction* clone() const override {
|
||||
return new Heaviside(lambda);
|
||||
}
|
||||
|
||||
virtual std::string stringify() const override {
|
||||
return "{ \"class\": \"NeuralNetwork::ActivationFunction::Heaviside\", \"lamba\" : "+std::to_string(lambda)+"}";
|
||||
}
|
||||
|
||||
protected:
|
||||
float lambda;
|
||||
};
|
||||
}
|
||||
}
|
||||
27
include/NeuralNetwork/ActivationFunction/HyperbolicTangent.h
Normal file
27
include/NeuralNetwork/ActivationFunction/HyperbolicTangent.h
Normal file
@@ -0,0 +1,27 @@
|
||||
#pragma once
|
||||
|
||||
#include "./ActivationFunction.h"
|
||||
|
||||
#include <cmath>
|
||||
|
||||
namespace NeuralNetwork {
|
||||
namespace ActivationFunction {
|
||||
|
||||
class HyperbolicTangent: public ActivationFunction {
|
||||
public:
|
||||
HyperbolicTangent(const float& lam=1):lambda(lam) {}
|
||||
inline virtual float derivatedOutput(const float&,const float &output) override { return lambda*(1-output*output); }
|
||||
inline virtual float operator()(const float &x) override { return tanh(lambda*x); };
|
||||
virtual ActivationFunction* clone() const override {
|
||||
return new HyperbolicTangent(lambda);
|
||||
}
|
||||
|
||||
virtual std::string stringify() const override {
|
||||
return "{ \"class\": \"NeuralNetwork::ActivationFunction::HyperbolicTangent\", \"lamba\" : "+std::to_string(lambda)+"}";
|
||||
}
|
||||
|
||||
protected:
|
||||
float lambda;
|
||||
};
|
||||
}
|
||||
}
|
||||
35
include/NeuralNetwork/ActivationFunction/Sigmoid.h
Normal file
35
include/NeuralNetwork/ActivationFunction/Sigmoid.h
Normal file
@@ -0,0 +1,35 @@
|
||||
#pragma once
|
||||
|
||||
#include <cmath>
|
||||
|
||||
#include "./StreamingActivationFunction.h"
|
||||
#include "../../sse_mathfun.h"
|
||||
|
||||
namespace NeuralNetwork {
|
||||
namespace ActivationFunction {
|
||||
|
||||
/**
|
||||
* @author Tomas Cernik (Tom.Cernik@gmail.com)
|
||||
* @brief Class for computing sigmoid
|
||||
*/
|
||||
class Sigmoid: public StreamingActivationFunction {
|
||||
public:
|
||||
Sigmoid(const float lambdaP = -0.5): lambda(lambdaP) {}
|
||||
inline virtual float derivatedOutput(const float&,const float &output) override { return lambda*output*(1.0f-output); }
|
||||
inline virtual float operator()(const float &x) override { return 1.0f / (1.0f +exp(lambda*x) ); };
|
||||
inline virtual __m128 operator()(const __m128 &x) override {
|
||||
// exp_ps is extremly slow!
|
||||
return _mm_div_ps(_mm_set1_ps(1.0),_mm_add_ps(exp_ps(_mm_mul_ps(_mm_set1_ps(lambda),x)),_mm_set1_ps(1.0)));
|
||||
}
|
||||
virtual ActivationFunction* clone() const override {
|
||||
return new Sigmoid(lambda);
|
||||
}
|
||||
|
||||
virtual std::string stringify() const override {
|
||||
return "{ \"class\": \"NeuralNetwork::ActivationFunction::Sigmoid\", \"lamba\" : "+std::to_string(lambda)+"}";
|
||||
}
|
||||
protected:
|
||||
float lambda;
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
#pragma once
|
||||
|
||||
#include <xmmintrin.h>
|
||||
|
||||
#include "./ActivationFunction.h"
|
||||
|
||||
namespace NeuralNetwork {
|
||||
namespace ActivationFunction {
|
||||
|
||||
/**
|
||||
* @author Tomas Cernik (Tom.Cernik@gmail.com)
|
||||
* @brief Abstract class of activation function with support of SSE
|
||||
*/
|
||||
class StreamingActivationFunction : public ActivationFunction {
|
||||
public:
|
||||
virtual float derivatedOutput(const float &input,const float &output)=0;
|
||||
virtual float operator()(const float &x)=0;
|
||||
|
||||
/**
|
||||
* @brief Returns value of four outputs
|
||||
* @param x is float[4], in every array value can be stored
|
||||
*/
|
||||
virtual __m128 operator()(const __m128 &x)=0;
|
||||
};
|
||||
}
|
||||
}
|
||||
27
include/NeuralNetwork/BasisFunction/BasisFunction.h
Normal file
27
include/NeuralNetwork/BasisFunction/BasisFunction.h
Normal file
@@ -0,0 +1,27 @@
|
||||
#pragma once
|
||||
|
||||
#include <math.h>
|
||||
#include <vector>
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace NeuralNetwork {
|
||||
namespace BasisFunction {
|
||||
class BasisFunction {
|
||||
public:
|
||||
virtual ~BasisFunction() {}
|
||||
virtual float operator()(const std::vector<float>& weights, const std::vector<float>& input)=0;
|
||||
|
||||
/**
|
||||
* @brief Function returns clone of object
|
||||
*/
|
||||
virtual BasisFunction* clone() const = 0;
|
||||
|
||||
/**
|
||||
* @brief This is a virtual function for storing Basis function
|
||||
* @returns json describing function
|
||||
*/
|
||||
virtual std::string stringify() const =0;
|
||||
};
|
||||
}
|
||||
}
|
||||
68
include/NeuralNetwork/BasisFunction/Linear.h
Normal file
68
include/NeuralNetwork/BasisFunction/Linear.h
Normal file
@@ -0,0 +1,68 @@
|
||||
#pragma once
|
||||
|
||||
#include <mmintrin.h>
|
||||
#include <xmmintrin.h>
|
||||
#include <emmintrin.h>
|
||||
#include <xmmintrin.h>
|
||||
#include <pmmintrin.h>
|
||||
|
||||
#include "./StreamingBasisFunction.h"
|
||||
|
||||
#include "../../sse_mathfun.h"
|
||||
|
||||
namespace NeuralNetwork {
|
||||
namespace BasisFunction {
|
||||
|
||||
class Linear: public StreamingBasisFunction {
|
||||
public:
|
||||
Linear() {}
|
||||
|
||||
inline virtual float computeStreaming(const std::vector<float>& weights, const std::vector<float>& input) override {
|
||||
size_t inputSize=input.size();
|
||||
size_t alignedPrev=inputSize-inputSize%4;
|
||||
|
||||
const float* weightsData=weights.data();
|
||||
const float* inputData=input.data();
|
||||
vec4f partialSolution;
|
||||
partialSolution.sse =_mm_setzero_ps();
|
||||
|
||||
//TODO prefetch ??
|
||||
for(register size_t k=0;k<alignedPrev;k+=4) {
|
||||
partialSolution.sse=_mm_add_ps(partialSolution.sse,_mm_mul_ps(_mm_load_ps(weightsData+k),_mm_load_ps(inputData+k)));
|
||||
}
|
||||
|
||||
for(register size_t k=alignedPrev;k<inputSize;k++) {
|
||||
partialSolution.sse=_mm_add_ps(partialSolution.sse,_mm_mul_ps(_mm_load_ss(weightsData+k),_mm_load_ss(inputData+k)));
|
||||
}
|
||||
|
||||
#ifdef USE_SSE2 //pre-SSE3 solution
|
||||
partialSolution.sse= _mm_add_ps(_mm_movehl_ps(partialSolution.sse, partialSolution.sse), partialSolution.sse);
|
||||
partialSolution.sse=_mm_add_ss(partialSolution.sse, _mm_shuffle_ps(partialSolution.sse,partialSolution.sse, 1));
|
||||
#else
|
||||
partialSolution.sse = _mm_hadd_ps(partialSolution.sse, partialSolution.sse);
|
||||
partialSolution.sse = _mm_hadd_ps(partialSolution.sse, partialSolution.sse);
|
||||
#endif
|
||||
|
||||
return partialSolution.f[0];
|
||||
}
|
||||
|
||||
inline virtual float compute(const std::vector<float>& weights, const std::vector<float>& input) override {
|
||||
register float tmp = 0;
|
||||
size_t inputSize=input.size();
|
||||
for(size_t k=0;k<inputSize;k++) {
|
||||
tmp+=input[k]*weights[k];
|
||||
}
|
||||
return tmp;
|
||||
}
|
||||
|
||||
virtual BasisFunction* clone() const override {
|
||||
return new Linear();
|
||||
}
|
||||
|
||||
virtual std::string stringify() const override {
|
||||
return "{ \"class\": \"NeuralNetwork::BasisFunction::Linear\" }";
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -11,6 +11,14 @@ namespace BasisFunction
|
||||
{
|
||||
public:
|
||||
Radial() {}
|
||||
|
||||
virtual BasisFunction* clone() const override {
|
||||
return new Radial();
|
||||
}
|
||||
|
||||
virtual std::string stringify() const override {
|
||||
return "{ \"class\": \"NeuralNetwork::BasisFunction::Radial\" }";
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
23
include/NeuralNetwork/BasisFunction/StreamingBasisFunction.h
Normal file
23
include/NeuralNetwork/BasisFunction/StreamingBasisFunction.h
Normal file
@@ -0,0 +1,23 @@
|
||||
#pragma once
|
||||
|
||||
#include <xmmintrin.h>
|
||||
|
||||
#include "./BasisFunction.h"
|
||||
|
||||
namespace NeuralNetwork {
|
||||
namespace BasisFunction {
|
||||
class StreamingBasisFunction : public BasisFunction {
|
||||
public:
|
||||
union vec4f{
|
||||
__m128 sse;
|
||||
float f[4];
|
||||
};
|
||||
|
||||
virtual float operator()(const std::vector<float>& weights, const std::vector<float>& input) override {
|
||||
return computeStreaming(weights,input);
|
||||
}
|
||||
virtual float computeStreaming(const std::vector<float>& weights, const std::vector<float>& input) =0;
|
||||
virtual float compute(const std::vector<float>& weights, const std::vector<float>& input) =0;
|
||||
};
|
||||
}
|
||||
}
|
||||
33
include/NeuralNetwork/Layer.h
Normal file
33
include/NeuralNetwork/Layer.h
Normal file
@@ -0,0 +1,33 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
|
||||
#include "Neuron.h"
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
/**
|
||||
* @author Tomas Cernik (Tom.Cernik@gmail.com)
|
||||
* @brief Abstract class for all Layers of neurons
|
||||
*/
|
||||
class Layer
|
||||
{
|
||||
public:
|
||||
|
||||
virtual ~Layer() {};
|
||||
|
||||
/**
|
||||
* @brief This is a virtual function for selecting neuron
|
||||
* @param neuron is position in layer
|
||||
* @returns Specific neuron
|
||||
*/
|
||||
|
||||
virtual Neuron& operator[](const size_t& neuron)=0;
|
||||
/**
|
||||
* @returns Size of layer
|
||||
*/
|
||||
virtual size_t size() const=0;
|
||||
};
|
||||
|
||||
}
|
||||
55
include/NeuralNetwork/Network.h
Normal file
55
include/NeuralNetwork/Network.h
Normal file
@@ -0,0 +1,55 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
|
||||
#include "Neuron.h"
|
||||
|
||||
#include "Stringifiable.h"
|
||||
|
||||
#include <ostream>
|
||||
#include <sstream>
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
|
||||
/**
|
||||
* @author Tomas Cernik (Tom.Cernik@gmail.com)
|
||||
* @brief Abstract model of simple Network
|
||||
*/
|
||||
class Network : public Stringifiable
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief Constructor for Network
|
||||
*/
|
||||
inline Network() {};
|
||||
|
||||
/**
|
||||
* @brief Virtual destructor for Network
|
||||
*/
|
||||
virtual ~Network() {};
|
||||
|
||||
/**
|
||||
* @brief This is a virtual function for all networks
|
||||
* @param input is input of network
|
||||
* @returns output of network
|
||||
*/
|
||||
virtual std::vector<float> computeOutput(const std::vector<float>& input)=0;
|
||||
|
||||
/**
|
||||
* @param t is number of threads, if set to 0 or 1 then threading is disabled
|
||||
* @brief Enables or disables Threaded computing of ANN
|
||||
*/
|
||||
|
||||
inline virtual void setThreads(const unsigned& t) final {threads=t;}
|
||||
|
||||
using Stringifiable::stringify;
|
||||
|
||||
protected:
|
||||
/**
|
||||
* @brief Number of threads used by network
|
||||
*/
|
||||
unsigned threads=1;
|
||||
};
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
#ifndef _S_NN_NEURON_H_
|
||||
#define _S_NN_NEURON_H_
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <string>
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
@@ -12,19 +11,35 @@ namespace NeuralNetwork
|
||||
class Neuron
|
||||
{
|
||||
public:
|
||||
|
||||
/**
|
||||
* @brief returns unique id for neuron
|
||||
*/
|
||||
virtual unsigned long id() const =0;
|
||||
|
||||
/**
|
||||
* @brief virtual destructor for Neuron
|
||||
*/
|
||||
virtual ~Neuron() {};
|
||||
|
||||
virtual float getWeight(const int &w) const =0;
|
||||
/**
|
||||
* @brief This is a virtual function for storing network
|
||||
* @returns json describing network and it's state
|
||||
*/
|
||||
virtual std::string stringify(const std::string &prefix="") const =0;
|
||||
|
||||
/**
|
||||
* @brief Gets weight
|
||||
* @param n is neuron
|
||||
*/
|
||||
virtual float getWeight(const Neuron &n) const =0;
|
||||
|
||||
/**
|
||||
* @brief Sets weight
|
||||
* @param i is number of neuron
|
||||
* @param p is new weight for input neuron i
|
||||
* @param n is neuron
|
||||
* @param w is new weight for input neuron n
|
||||
*/
|
||||
virtual void setWeight(const int& i ,const float &p) =0;
|
||||
virtual void setWeight(const Neuron& n ,const float &w) =0;
|
||||
|
||||
/**
|
||||
* @brief Returns output of neuron
|
||||
@@ -34,16 +49,16 @@ namespace NeuralNetwork
|
||||
/**
|
||||
* @brief Returns input of neuron
|
||||
*/
|
||||
virtual float input() const=0;
|
||||
virtual float value() const=0;
|
||||
|
||||
/**
|
||||
* @brief Returns value for derivation of activation function
|
||||
*/
|
||||
virtual float derivatedOutput() const=0;
|
||||
// virtual float derivatedOutput() const=0;
|
||||
|
||||
/**
|
||||
* @brief Function sets bias for neuron
|
||||
* @param biad is new bias (initial value for neuron)
|
||||
* @param bias is new bias (initial value for neuron)
|
||||
*/
|
||||
virtual void setBias(const float &bias)=0;
|
||||
|
||||
@@ -53,5 +68,4 @@ namespace NeuralNetwork
|
||||
virtual float getBias() const=0;
|
||||
protected:
|
||||
};
|
||||
}
|
||||
#endif
|
||||
}
|
||||
88
include/NeuralNetwork/Recurrent/Network.h
Normal file
88
include/NeuralNetwork/Recurrent/Network.h
Normal file
@@ -0,0 +1,88 @@
|
||||
#pragma once
|
||||
|
||||
#include "../Network.h"
|
||||
#include "Neuron.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include <sstream>
|
||||
#include <iomanip>
|
||||
#include <limits>
|
||||
|
||||
namespace NeuralNetwork {
|
||||
namespace Recurrent {
|
||||
|
||||
/**
|
||||
* @author Tomas Cernik (Tom.Cernik@gmail.com)
|
||||
* @brief Reccurent model of Artifical neural network
|
||||
*/
|
||||
class Network: public NeuralNetwork::Network {
|
||||
public:
|
||||
|
||||
/**
|
||||
* @brief Constructor for Network
|
||||
* @param _inputSize is number of inputs to network
|
||||
* @param _outputSize is size of output from network
|
||||
* @param hiddenUnits is number of hiddenUnits to be created
|
||||
*/
|
||||
inline Network(size_t _inputSize, size_t _outputSize,size_t hiddenUnits=0):NeuralNetwork::Network(),inputSize(_inputSize),outputSize(_outputSize), neurons(0) {
|
||||
for(size_t i=0;i<_inputSize+_outputSize;i++) {
|
||||
addNeuron();
|
||||
}
|
||||
|
||||
for(size_t i=0;i<hiddenUnits;i++) {
|
||||
addNeuron();
|
||||
}
|
||||
};
|
||||
|
||||
// todo: implement
|
||||
inline Network(const std::string &json) {
|
||||
|
||||
}
|
||||
/**
|
||||
* @brief Virtual destructor for Network
|
||||
*/
|
||||
virtual ~Network() {};
|
||||
|
||||
/**
|
||||
* @brief This is a function to compute one iterations of network
|
||||
* @param input is input of network
|
||||
* @returns output of network
|
||||
*/
|
||||
inline virtual std::vector<float> computeOutput(const std::vector<float>& input) override {
|
||||
return computeOutput(input,1);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief This is a function to compute iterations of network
|
||||
* @param input is input of network
|
||||
* @param iterations is number of iterations
|
||||
* @returns output of network
|
||||
*/
|
||||
std::vector<float> computeOutput(const std::vector<float>& input, unsigned int iterations);
|
||||
|
||||
std::vector<Neuron>& getNeurons () {
|
||||
return neurons;
|
||||
}
|
||||
|
||||
using NeuralNetwork::Network::stringify;
|
||||
|
||||
void stringify(std::ostream& out) const override;
|
||||
|
||||
Neuron& addNeuron() {
|
||||
neurons.push_back(Recurrent::Neuron(neurons.size()));
|
||||
Neuron &newNeuron=neurons.back();
|
||||
for(size_t i=0;i<neurons.size();i++) {
|
||||
neurons[i].setWeight(newNeuron,0.0);
|
||||
}
|
||||
return newNeuron;
|
||||
}
|
||||
|
||||
protected:
|
||||
size_t inputSize=0;
|
||||
size_t outputSize=0;
|
||||
|
||||
std::vector<Recurrent::Neuron> neurons;
|
||||
};
|
||||
}
|
||||
}
|
||||
123
include/NeuralNetwork/Recurrent/Neuron.h
Normal file
123
include/NeuralNetwork/Recurrent/Neuron.h
Normal file
@@ -0,0 +1,123 @@
|
||||
#pragma once
|
||||
|
||||
#include "../Neuron.h"
|
||||
#include <NeuralNetwork/ActivationFunction/Sigmoid.h>
|
||||
#include <NeuralNetwork/BasisFunction/Linear.h>
|
||||
#include <vector>
|
||||
|
||||
|
||||
#include <sstream>
|
||||
#include <iomanip>
|
||||
#include <limits>
|
||||
|
||||
namespace NeuralNetwork {
|
||||
namespace Recurrent {
|
||||
|
||||
class Network;
|
||||
|
||||
/**
|
||||
* @author Tomas Cernik (Tom.Cernik@gmail.com)
|
||||
* @brief Class of recurrent neuron.
|
||||
*/
|
||||
class Neuron : public NeuralNetwork::Neuron
|
||||
{
|
||||
public:
|
||||
Neuron(unsigned long _id=0,const float& _bias = 0): NeuralNetwork::Neuron(), basis(new BasisFunction::Linear),
|
||||
activation(new ActivationFunction::Sigmoid(-4.9)),
|
||||
id_(_id),bias(_bias),weights(_id+1),_output(0),_value(0) {
|
||||
}
|
||||
|
||||
Neuron(const Neuron &r): NeuralNetwork::Neuron(), basis(r.basis->clone()), activation(r.activation->clone()),id_(r.id_),
|
||||
bias(r.bias), weights(r.weights), _output(r._output), _value(r._value) {
|
||||
}
|
||||
virtual ~Neuron() {
|
||||
delete basis;
|
||||
delete activation;
|
||||
};
|
||||
|
||||
virtual std::string stringify(const std::string &prefix="") const override;
|
||||
|
||||
Recurrent::Neuron& operator=(const NeuralNetwork::Recurrent::Neuron&r) {
|
||||
id_=r.id_;
|
||||
bias=r.bias;
|
||||
weights=r.weights;
|
||||
basis=r.basis->clone();
|
||||
activation=r.activation->clone();
|
||||
return *this;
|
||||
}
|
||||
|
||||
virtual long unsigned int id() const override {
|
||||
return id_;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Gets weight
|
||||
* @param n is neuron
|
||||
*/
|
||||
virtual float getWeight(const NeuralNetwork::Neuron &n) const override {
|
||||
return weights[n.id()];
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Sets weight
|
||||
* @param n is neuron
|
||||
* @param w is new weight for input neuron n
|
||||
*/
|
||||
virtual void setWeight(const NeuralNetwork::Neuron& n ,const float &w) override {
|
||||
if(weights.size()<n.id()+1) {
|
||||
weights.resize(n.id()+1);
|
||||
}
|
||||
weights[n.id()]=w;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Returns output of neuron
|
||||
*/
|
||||
virtual float output() const override {
|
||||
return _output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Returns input of neuron
|
||||
*/
|
||||
virtual float value() const override {
|
||||
return _value;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Function sets bias for neuron
|
||||
* @param _bias is new bias (initial value for neuron)
|
||||
*/
|
||||
virtual void setBias(const float &_bias) override {
|
||||
bias=_bias;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Function returns bias for neuron
|
||||
*/
|
||||
virtual float getBias() const override {
|
||||
return bias;
|
||||
}
|
||||
|
||||
float operator()(const std::vector<float>& inputs) {
|
||||
//compute value
|
||||
_value=basis->operator()(weights,inputs)+bias;
|
||||
|
||||
//compute output
|
||||
_output=activation->operator()(_value);
|
||||
|
||||
return _output;
|
||||
}
|
||||
|
||||
protected:
|
||||
BasisFunction::BasisFunction *basis;
|
||||
ActivationFunction::ActivationFunction *activation;
|
||||
|
||||
unsigned long id_;
|
||||
float bias;
|
||||
std::vector<float> weights;
|
||||
float _output;
|
||||
float _value;
|
||||
};
|
||||
}
|
||||
}
|
||||
29
include/NeuralNetwork/Stringifiable.h
Normal file
29
include/NeuralNetwork/Stringifiable.h
Normal file
@@ -0,0 +1,29 @@
|
||||
#pragma once
|
||||
|
||||
#include <sstream>
|
||||
|
||||
namespace NeuralNetwork {
|
||||
class Stringifiable {
|
||||
public:
|
||||
|
||||
virtual ~Stringifiable() {
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief This is a virtual function for class
|
||||
*/
|
||||
virtual void stringify(std::ostream& out) const =0;
|
||||
|
||||
virtual std::string stringify() final {
|
||||
std::ostringstream s;
|
||||
stringify(s);
|
||||
return s.str();
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
static std::ostream& operator<<(std::ostream& o, const Stringifiable& n) {
|
||||
n.stringify(o);
|
||||
return o;
|
||||
}
|
||||
}
|
||||
80
include/Tools/Array.h
Normal file
80
include/Tools/Array.h
Normal file
@@ -0,0 +1,80 @@
|
||||
#pragma once
|
||||
|
||||
namespace Array {
|
||||
|
||||
/**
|
||||
* @author Tomas Cernik (Tom.Cernik@gmail.com)
|
||||
* @brief Template of array for simple usage
|
||||
*/
|
||||
|
||||
template <typename T>
|
||||
class Array {
|
||||
public:
|
||||
|
||||
Array(unsigned long size=0):arr(size==0? nullptr: new T[size]),_size(0) {
|
||||
|
||||
}
|
||||
|
||||
Array (const Array& r):arr(r.arr),_size(r._size) {
|
||||
}
|
||||
|
||||
~Array() {
|
||||
}
|
||||
|
||||
inline Array& operator=(const Array& r) {
|
||||
arr=r.arr;
|
||||
_size=r._size;
|
||||
}
|
||||
|
||||
inline void resize(unsigned long size) {
|
||||
if(arr==nullptr) {
|
||||
arr=new T[size];
|
||||
_size=size;
|
||||
}else {
|
||||
T* tmp=new T[size];
|
||||
for(unsigned long i=0;i<_size;i++) {
|
||||
tmp[i]=arr[i];
|
||||
}
|
||||
delete[] arr;
|
||||
arr=tmp;
|
||||
}
|
||||
}
|
||||
|
||||
inline void free() {
|
||||
delete[] arr;
|
||||
arr=nullptr;
|
||||
_size=0;
|
||||
}
|
||||
|
||||
inline const T& operator[](unsigned long i) const {
|
||||
return arr[i];
|
||||
}
|
||||
|
||||
inline T& operator[](unsigned long i) {
|
||||
return arr[i];
|
||||
}
|
||||
|
||||
unsigned long size() const {
|
||||
return _size;
|
||||
}
|
||||
|
||||
protected:
|
||||
T* arr;
|
||||
unsigned long _size;
|
||||
private:
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class DynamicArray: public Array<T> {
|
||||
public:
|
||||
DynamicArray(unsigned long size=0,float _scaleFactor=1):Array<T>(size),scaleFactor(_scaleFactor) {
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
protected:
|
||||
float scaleFactor;
|
||||
private:
|
||||
};
|
||||
}
|
||||
@@ -6,6 +6,4 @@
|
||||
@author Tomas Cernik (Tom.Cernik@gmail.com)
|
||||
|
||||
|
||||
TODO
|
||||
|
||||
*/
|
||||
|
||||
32
src/Makefile
32
src/Makefile
@@ -1,8 +1,32 @@
|
||||
OBJFILES= sse_mathfun.o
|
||||
|
||||
include ../Makefile.const
|
||||
|
||||
all: $(OBJFILES)
|
||||
OBJFILES= ./sse_mathfun.o ./NeuralNetwork/Recurrent/Network.o ./NeuralNetwork/Recurrent/Neuron.o
|
||||
|
||||
#LayerNetwork.o\
|
||||
# Learning/Learning.o Learning/BackPropagation.o ../sse_mathfun.o
|
||||
|
||||
LINKFILES=
|
||||
|
||||
LIBNAME=NeuralNetwork
|
||||
|
||||
all: lib
|
||||
|
||||
spec:=../include/
|
||||
|
||||
|
||||
%.o : %.cpp $(patsubst ./%.o,../include/%.h,$<)
|
||||
$(CXX) $(CXXFLAGS) $(OPTIMALIZATION) -I../include -c $< -o $@
|
||||
|
||||
lib: $(LIBNAME).so $(LIBNAME).a
|
||||
|
||||
$(LIBNAME).so: $(OBJFILES)
|
||||
$(CXX) -shared $(CXXFLAGS) $(OBJFILES) $(LINKFILES) -o $(LIBNAME).so
|
||||
|
||||
$(LIBNAME).a: $(OBJFILES)
|
||||
rm -f $(LIBNAME).a # create new library
|
||||
ar rcv $(LIBNAME).a $(OBJFILES) $(LINKFILES)
|
||||
ranlib $(LIBNAME).a
|
||||
nm --demangle $(LIBNAME).a > $(LIBNAME).nm
|
||||
|
||||
clean:
|
||||
@rm -f ./*.o ./*.so ./*.a ./*.nm ./*/*.o
|
||||
@rm -f ./*.o ./*.so ./*.a ./*.nm ./*/*.o
|
||||
@@ -1,19 +0,0 @@
|
||||
#ifndef __TRAN_FUN_H_
|
||||
#define __TRAN_FUN_H_
|
||||
|
||||
#include <math.h>
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
namespace ActivationFunction
|
||||
{
|
||||
class ActivationFunction
|
||||
{
|
||||
public:
|
||||
virtual ~ActivationFunction() {}
|
||||
virtual float derivatedOutput(const float &input,const float &output)=0;
|
||||
virtual float operator()(const float &x)=0;
|
||||
};
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,22 +0,0 @@
|
||||
#ifndef __TRAN_HEAVISIDE_H_
|
||||
#define __TRAN_HEAVISIDE_H_
|
||||
|
||||
#include "./ActivationFunction.h"
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
namespace ActivationFunction
|
||||
{
|
||||
class Heaviside: public ActivationFunction
|
||||
{
|
||||
public:
|
||||
Sigmoid(const float &lambdaP): lambda(lambdaP) {}
|
||||
inline virtual float derivatedOutput(const float &input,const float &output) override { return 1.0; }
|
||||
inline virtual float operator()(const float &x) override { return x>k ? 1.0f : 0.0f; };
|
||||
protected:
|
||||
float lambda;
|
||||
|
||||
};
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,21 +0,0 @@
|
||||
#ifndef __TRAN_HYPTAN_H_
|
||||
#define __TRAN_HYPTAN_H_
|
||||
|
||||
#include "./ActivationFunction.h"
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
namespace ActivationFunction
|
||||
{
|
||||
class HyperbolicTangent: public ActivationFunction
|
||||
{
|
||||
public:
|
||||
HyperbolicTangent(const float& lam=1):lambda(lam) {}
|
||||
inline virtual float derivatedOutput(const float&,const float &output) override { return lambda*(1-output*output); }
|
||||
inline virtual float operator()(const float &x) override { return tanh(lambda*x); };
|
||||
protected:
|
||||
float lambda;
|
||||
};
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,30 +0,0 @@
|
||||
#ifndef __TRAN_SIGMOID_H_
|
||||
#define __TRAN_SIGMOID_H_
|
||||
|
||||
#include "./StreamingActivationFunction.h"
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
namespace ActivationFunction
|
||||
{
|
||||
class Sigmoid: public StreamingActivationFunction
|
||||
{
|
||||
public:
|
||||
Sigmoid(const float lambdaP = 0.8): lambda(lambdaP) {}
|
||||
inline virtual float derivatedOutput(const float&,const float &output) override { return lambda*output*(1.0f-output); }
|
||||
inline virtual float operator()(const float &x) override { return 1.0f / (1.0f +exp(-lambda*x) ); };
|
||||
inline virtual __m128 operator()(__m128 x) override {
|
||||
x=_mm_mul_ps(temporaryConstLambda,x); //-lambda*sol[k]
|
||||
x=exp_ps(x); //exp(x)
|
||||
x= _mm_add_ps(x,temporaryConst1); //1+exp()
|
||||
x= _mm_div_ps(temporaryConst1,x);//1/....
|
||||
return x;
|
||||
}
|
||||
protected:
|
||||
float lambda;
|
||||
__m128 temporaryConst1=_mm_set1_ps(1.0);
|
||||
__m128 temporaryConstLambda=_mm_set1_ps(-lambda);
|
||||
};
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,23 +0,0 @@
|
||||
#ifndef __STREAMINGTRAN_FUN_H_
|
||||
#define __STREAMINGTRAN_FUN_H_
|
||||
|
||||
#include <xmmintrin.h>
|
||||
|
||||
#include "../../sse_mathfun.h"
|
||||
|
||||
#include "./ActivationFunction.h"
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
namespace ActivationFunction
|
||||
{
|
||||
class StreamingActivationFunction : public ActivationFunction
|
||||
{
|
||||
public:
|
||||
virtual float derivatedOutput(const float &input,const float &output)=0;
|
||||
virtual float operator()(const float &x)=0;
|
||||
virtual __m128 operator()(__m128)=0; // it must be overriden to be used!
|
||||
};
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,18 +0,0 @@
|
||||
#ifndef _BASIS_FUN_H_
|
||||
#define _BASIS_FUN_H_
|
||||
|
||||
#include <math.h>
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
namespace BasisFunction
|
||||
{
|
||||
class BasisFunction
|
||||
{
|
||||
public:
|
||||
virtual ~BasisFunction() {}
|
||||
virtual float operator()(const size_t &inputSize, const float* weights, const float* input)=0;
|
||||
};
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,63 +0,0 @@
|
||||
#ifndef __BASIS_FEEDFORWARD_H_
|
||||
#define __BASIS_FEEDFORWARD_H_
|
||||
|
||||
#include "./StreamingBasisFunction.h"
|
||||
|
||||
#include <mmintrin.h>
|
||||
#include <xmmintrin.h>
|
||||
#include <emmintrin.h>
|
||||
#include <xmmintrin.h>
|
||||
#include <pmmintrin.h>
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
namespace BasisFunction
|
||||
{
|
||||
class FeedForward: public StreamingBasisFunction
|
||||
{
|
||||
public:
|
||||
FeedForward() {}
|
||||
|
||||
inline virtual __m128 operator()(const size_t& inputSize, const float* weights, const float* input, const size_t& alignedPrev)
|
||||
{
|
||||
__m128 partialSolution= _mm_setzero_ps();
|
||||
__m128 w=_mm_setzero_ps();
|
||||
__m128 sols;
|
||||
for(register size_t k=alignedPrev;k<inputSize;k++)
|
||||
{
|
||||
w = _mm_load_ss(weights+k);
|
||||
sols = _mm_load_ss(input+k);
|
||||
w=_mm_mul_ps(w,sols);
|
||||
partialSolution=_mm_add_ps(partialSolution,w);
|
||||
}
|
||||
for(register size_t k=0;k<alignedPrev;k+=sizeof(float)) // TODO ??? sizeof(float)
|
||||
{
|
||||
w = _mm_load_ps(weights+k);
|
||||
sols = _mm_load_ps(input+k);
|
||||
w=_mm_mul_ps(w,sols);
|
||||
partialSolution=_mm_add_ps(partialSolution,w);
|
||||
}
|
||||
#ifdef USE_SSE2 //pre-SSE3 solution
|
||||
partialSolution= _mm_add_ps(_mm_movehl_ps(partialSolution, partialSolution), partialSolution);
|
||||
partialSolution=_mm_add_ss(partialSolution, _mm_shuffle_ps(partialSolution,partialSolution, 1));
|
||||
#else
|
||||
partialSolution = _mm_hadd_ps(partialSolution, partialSolution);
|
||||
partialSolution = _mm_hadd_ps(partialSolution, partialSolution);
|
||||
#endif
|
||||
return partialSolution;
|
||||
}
|
||||
|
||||
inline virtual float operator()(const size_t &inputSize, const float* weights, const float* input)
|
||||
{
|
||||
register float tmp = 0;
|
||||
for(register size_t k=0;k<inputSize;k++)
|
||||
{
|
||||
tmp+=input[k]*weights[k];
|
||||
}
|
||||
return tmp;
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,22 +0,0 @@
|
||||
#ifndef __STREAMINGBASIS_FUN_H_
|
||||
#define __STREAMINGBASIS_FUN_H_
|
||||
|
||||
#include <xmmintrin.h>
|
||||
|
||||
#include "../../sse_mathfun.h"
|
||||
|
||||
#include "./BasisFunction.h"
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
namespace BasisFunction
|
||||
{
|
||||
class StreamingBasisFunction : public BasisFunction
|
||||
{
|
||||
public:
|
||||
virtual float operator()(const size_t &inputSize, const float* weights, const float* input) = 0;
|
||||
virtual __m128 operator()(const size_t& inputSize, const float* weights, const float* input, const size_t& alignedPrev) =0;
|
||||
};
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,18 +0,0 @@
|
||||
#ifndef _S_NN_FF_H_
|
||||
#define _S_NN_FF_H_
|
||||
|
||||
#include "LayerNetwork.h"
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
class FeedForward : public LayerNetwork
|
||||
{
|
||||
public:
|
||||
FeedForward(std::initializer_list<size_t> s, double lam=NeuralNetwork::lambda,
|
||||
LayerNetworkInitializer weightInit=
|
||||
[](const size_t&, const size_t &, const size_t &)->float{ return 1.0-((float)(rand()%2001))/1000.0;}
|
||||
) : LayerNetwork(s,lam,weightInit) {}
|
||||
|
||||
};
|
||||
}
|
||||
#endif
|
||||
@@ -1,201 +0,0 @@
|
||||
#include "LayerNetwork.h"
|
||||
|
||||
using namespace NeuralNetwork;
|
||||
|
||||
LayerNetworkLayer::~LayerNetworkLayer()
|
||||
{
|
||||
if(neurons!=nullptr)
|
||||
{
|
||||
for(size_t i=0;i<layerSize-1;i++)
|
||||
{
|
||||
delete neurons[i];
|
||||
}
|
||||
delete[] neurons;
|
||||
}
|
||||
}
|
||||
|
||||
LayerNetworkNeuron& LayerNetworkLayer::operator[](const size_t& neuron)
|
||||
{
|
||||
if(neurons==nullptr)
|
||||
{
|
||||
neurons=new LayerNetworkNeuron*[layerSize-1];
|
||||
for(size_t i=1;i<layerSize;i++)
|
||||
{
|
||||
neurons[i-1]=new LayerNetworkNeuron(weights[i],outputs[i],inputs[i],lambda,function);
|
||||
}
|
||||
}
|
||||
|
||||
if(neuron>=layerSize)
|
||||
throw std::out_of_range("Not so many neurons in layers.");
|
||||
|
||||
return *neurons[neuron];
|
||||
|
||||
}
|
||||
|
||||
LayerNetwork::LayerNetwork(std::initializer_list<size_t> s, double lam, LayerNetworkInitializer weightInit): Network(),layers(s.size())
|
||||
{
|
||||
transfer = new ActivationFunction::ActivationFunction*[s.size()];
|
||||
weights= new float**[s.size()];
|
||||
layerSizes= new size_t[s.size()];
|
||||
outputs= new float*[s.size()];
|
||||
inputs= new float*[s.size()];
|
||||
register int i=0;
|
||||
register int prev_size=1;
|
||||
for(int layeSize:s) // TODO rename
|
||||
{
|
||||
transfer[i]= new ActivationFunction::Sigmoid(lam);
|
||||
layeSize+=1;
|
||||
if(i==0)
|
||||
{
|
||||
prev_size=layeSize;
|
||||
}
|
||||
layerSizes[i]=layeSize;
|
||||
weights[i]= new float*[layeSize];
|
||||
outputs[i]= new float[layeSize];
|
||||
inputs[i]= new float[layeSize];
|
||||
|
||||
outputs[i][0]=1.0;
|
||||
for (int j=1;j<layeSize;j++)
|
||||
{
|
||||
weights[i][j]= new float[prev_size];
|
||||
for(int k=0;k<prev_size;k++)
|
||||
{
|
||||
weights[i][j][k]=weightInit(i,j,k);
|
||||
}
|
||||
}
|
||||
i++;
|
||||
prev_size=layeSize;
|
||||
}
|
||||
}
|
||||
|
||||
LayerNetwork::~LayerNetwork()
|
||||
{
|
||||
if(weights != nullptr)
|
||||
{
|
||||
for(size_t i=0;i<layers;i++)
|
||||
{
|
||||
for (size_t j=1;j<layerSizes[i];j++)
|
||||
{
|
||||
delete[] weights[i][j];
|
||||
}
|
||||
delete[] weights[i];
|
||||
delete[] outputs[i];
|
||||
delete[] inputs[i];
|
||||
delete transfer[i];
|
||||
}
|
||||
delete[] weights;
|
||||
delete[] layerSizes;
|
||||
delete[] outputs;
|
||||
delete[] inputs;
|
||||
delete[] transfer;
|
||||
}
|
||||
if(ffLayers !=nullptr)
|
||||
{
|
||||
for(size_t i=0;i<layers;i++)
|
||||
{
|
||||
delete ffLayers[i];
|
||||
}
|
||||
delete[] ffLayers;
|
||||
}
|
||||
delete basisFunction;
|
||||
}
|
||||
|
||||
void LayerNetwork::solvePart(float *newSolution, register size_t begin, size_t end,size_t prevSize, float *sol,size_t layer)
|
||||
{
|
||||
ActivationFunction::StreamingActivationFunction *function=dynamic_cast<ActivationFunction::StreamingActivationFunction*>(transfer[layer]);
|
||||
BasisFunction::StreamingBasisFunction *bFunc=dynamic_cast<BasisFunction::StreamingBasisFunction*>(basisFunction);
|
||||
|
||||
size_t alignedPrev=prevSize>16?(prevSize-(prevSize%16)):0;
|
||||
__m128 partialSolution;
|
||||
|
||||
if(prevSize >=4 && function !=nullptr && bFunc != nullptr)
|
||||
{
|
||||
for( size_t j=begin;j<end;j++)
|
||||
{
|
||||
partialSolution=bFunc->operator()(prevSize,weights[layer][j],sol,alignedPrev);
|
||||
_mm_store_ss(inputs[layer]+j,partialSolution);
|
||||
partialSolution=function->operator()(partialSolution);
|
||||
_mm_store_ss(newSolution+j,partialSolution);
|
||||
}
|
||||
}else
|
||||
{
|
||||
for( size_t j=begin;j<end;j++)
|
||||
{
|
||||
if (bFunc !=nullptr && prevSize >=4)
|
||||
{
|
||||
partialSolution=bFunc->operator()(prevSize,weights[layer][j],sol,alignedPrev);
|
||||
_mm_store_ss(inputs[layer]+j,partialSolution);
|
||||
newSolution[j]=transfer[layer]->operator()(inputs[layer][j]);
|
||||
}else
|
||||
{
|
||||
const float tmp=basisFunction->operator()(prevSize,weights[layer][j],sol);
|
||||
inputs[layer][j]=tmp;
|
||||
newSolution[j]=transfer[layer]->operator()(tmp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<float> LayerNetwork::solve(const std::vector<float>& p)
|
||||
{
|
||||
register float* sol=outputs[0];
|
||||
|
||||
if(p.size()+1 != layerSizes[0])
|
||||
{
|
||||
throw std::out_of_range("Wrong number of inputs");
|
||||
}
|
||||
|
||||
sol[0]=1;
|
||||
for(size_t i=0;i<p.size();i++)
|
||||
sol[i+1]=p[i];
|
||||
|
||||
register size_t prevSize=layerSizes[0];
|
||||
for(register size_t i=1;i<layers;i++)
|
||||
{
|
||||
float* newSolution= outputs[i];
|
||||
if(threads > 1 && (layerSizes[i] > 700 || prevSize > 700)) // 700 is an guess about actual size, when creating thread has some speedup
|
||||
{
|
||||
std::vector<std::thread> th;
|
||||
size_t s=1;
|
||||
register size_t step =layerSizes[i]/threads;
|
||||
for(size_t t=1;t<threads;t++)
|
||||
{
|
||||
th.push_back(std::thread([i,this,newSolution,prevSize,sol](size_t from, size_t to)->void{
|
||||
solvePart(newSolution,from,to,prevSize,sol,i);
|
||||
},s,s+step));
|
||||
s+=step;
|
||||
}
|
||||
solvePart(newSolution,s,layerSizes[i],prevSize,sol,i);
|
||||
for (auto& thr : th)
|
||||
thr.join();
|
||||
}else
|
||||
{
|
||||
solvePart(newSolution,1,layerSizes[i],prevSize,sol,i);
|
||||
}
|
||||
prevSize=layerSizes[i];
|
||||
sol=newSolution;
|
||||
}
|
||||
std::vector<float> ret;
|
||||
for(size_t i=1;i<prevSize;i++)
|
||||
{
|
||||
ret.push_back(sol[i]);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
LayerNetworkLayer& LayerNetwork::operator[](const size_t& l)
|
||||
{
|
||||
if(ffLayers==nullptr)
|
||||
{
|
||||
ffLayers=new LayerNetworkLayer*[layers];
|
||||
for(size_t i=0;i<layers;i++)
|
||||
{
|
||||
ffLayers[i]=new LayerNetworkLayer(layerSizes[i],weights[i],outputs[i],inputs[i],lambda,*transfer[i]);
|
||||
}
|
||||
}
|
||||
|
||||
if(l>=layers)
|
||||
throw std::out_of_range("Not so many layers in network.");
|
||||
|
||||
return *ffLayers[l];
|
||||
}
|
||||
@@ -1,140 +0,0 @@
|
||||
#ifndef _NN_LN_H_
|
||||
#define _NN_LN_H_
|
||||
|
||||
#include "Network.h"
|
||||
|
||||
#include "ActivationFunction/Sigmoid.h"
|
||||
|
||||
#include "BasisFunction/FeedForward.h"
|
||||
|
||||
#include <vector>
|
||||
#include <initializer_list>
|
||||
#include <thread>
|
||||
#include <pthread.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <math.h>
|
||||
|
||||
#include <mmintrin.h>
|
||||
#include <xmmintrin.h>
|
||||
#include <emmintrin.h>
|
||||
#include <xmmintrin.h>
|
||||
#include <pmmintrin.h>
|
||||
|
||||
#include "../sse_mathfun.h"
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
class LayerNetworkNeuron : public Neuron
|
||||
{
|
||||
public:
|
||||
inline LayerNetworkNeuron(float *w, float &outputF, float &i,float lam,ActivationFunction::ActivationFunction &fun):function(fun),weights(w),out(outputF),inputs(i),lambda(lam) { }
|
||||
|
||||
LayerNetworkNeuron() = delete;
|
||||
LayerNetworkNeuron(const LayerNetworkNeuron&) = delete;
|
||||
LayerNetworkNeuron& operator=(const LayerNetworkNeuron&) = delete;
|
||||
|
||||
inline virtual float getWeight(const int& i ) const override { return weights[i+1]; }
|
||||
inline virtual void setWeight(const int& i,const float &p) override { weights[i+1]=p; }
|
||||
|
||||
inline virtual float output() const override { return out; }
|
||||
inline virtual float input() const override { return inputs; }
|
||||
inline virtual float derivatedOutput() const override { return function.derivatedOutput(inputs,out); }
|
||||
|
||||
inline virtual float getBias() const override { return weights[0]; }
|
||||
inline virtual void setBias(const float & bias) override { weights[0]=bias; }
|
||||
protected:
|
||||
ActivationFunction::ActivationFunction &function;
|
||||
float *weights;
|
||||
float &out;
|
||||
float &inputs;
|
||||
float lambda;
|
||||
private:
|
||||
};
|
||||
|
||||
class LayerNetworkLayer: public Layer
|
||||
{
|
||||
public:
|
||||
inline LayerNetworkLayer(size_t s, float **w,float *out,float *in,float lam,ActivationFunction::ActivationFunction &fun): function(fun), layerSize(s),weights(w),outputs(out),inputs(in),lambda(lam) {}
|
||||
~LayerNetworkLayer();
|
||||
|
||||
LayerNetworkLayer(const LayerNetworkLayer &) = delete;
|
||||
LayerNetworkLayer& operator=(const LayerNetworkLayer &) = delete;
|
||||
|
||||
virtual LayerNetworkNeuron& operator[](const size_t& neuron) override;
|
||||
inline virtual size_t size() const override {return layerSize-1;};
|
||||
protected:
|
||||
ActivationFunction::ActivationFunction &function;
|
||||
LayerNetworkNeuron **neurons=nullptr;
|
||||
size_t layerSize;
|
||||
float **weights;
|
||||
float *outputs;
|
||||
float *inputs;
|
||||
float lambda;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief typedef for LayerNetwork network initializating function
|
||||
*/
|
||||
typedef std::function<float(const size_t&layer, const size_t &neuron, const size_t &weight)> LayerNetworkInitializer;
|
||||
|
||||
/**
|
||||
* @author Tomas Cernik (Tom.Cernik@gmail.com)
|
||||
* @brief Class representing LayerNetwork network
|
||||
* @see ACyclicNetwork
|
||||
*
|
||||
* @b Usage:
|
||||
* @code
|
||||
* Shin::NeuralNetwork::LayerNetwork net({1,5,2});
|
||||
* net.setThreads(2); // it alows network to use 2 threads if it needs to.
|
||||
* Shin::Solution sol = net.solve(Shin::Problem(0.1)) // and finaly, solve Problem
|
||||
* @endcode
|
||||
*/
|
||||
class LayerNetwork:public Network
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief Constructor for LayerNetwork
|
||||
* @param s is initiaizer for layers (it's sizes)
|
||||
* @param lam is parametr for TransferFunction
|
||||
* @param weightInit is weight initializer function
|
||||
*/
|
||||
LayerNetwork(std::initializer_list<size_t> s, double lam=NeuralNetwork::lambda,
|
||||
LayerNetworkInitializer weightInit=
|
||||
[](const size_t&, const size_t &, const size_t &)->float{ return 1.0-((float)(rand()%2001))/1000.0;}
|
||||
);
|
||||
virtual ~LayerNetwork();
|
||||
|
||||
/**
|
||||
* @brief we don't want to allow network to be copied
|
||||
*/
|
||||
LayerNetwork(const LayerNetwork &f) = delete; //TODO
|
||||
/**
|
||||
* @brief we don't want to allow network to be assigned
|
||||
*/
|
||||
LayerNetwork operator=(const LayerNetwork &f)=delete;
|
||||
|
||||
/**
|
||||
* @brief computes output Solution from input Problem
|
||||
*/
|
||||
|
||||
virtual size_t size() const { return layers; };
|
||||
|
||||
virtual std::vector<float> solve(const std::vector<float>& input) override;
|
||||
|
||||
virtual LayerNetworkLayer& operator[](const size_t& l) override;
|
||||
protected:
|
||||
void solvePart(float *newSolution, size_t begin, size_t end,size_t prevSize, float* sol,size_t layer);
|
||||
private:
|
||||
LayerNetworkLayer **ffLayers=nullptr;
|
||||
float ***weights=nullptr;
|
||||
float **outputs=nullptr;
|
||||
float **inputs=nullptr;
|
||||
ActivationFunction::ActivationFunction **transfer=nullptr;
|
||||
BasisFunction::BasisFunction *basisFunction = new BasisFunction::FeedForward();
|
||||
size_t *layerSizes=nullptr;
|
||||
size_t layers;/**< Number of layers */
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
||||
@@ -1 +0,0 @@
|
||||
./BackPropagation.h
|
||||
@@ -1,109 +0,0 @@
|
||||
#include "./BackPropagation"
|
||||
|
||||
NeuralNetwork::Learning::BackPropagation::~BackPropagation()
|
||||
{
|
||||
if(deltas!=nullptr)
|
||||
{
|
||||
for(size_t i=0;i<network.size();i++)
|
||||
delete[] deltas[i];
|
||||
}
|
||||
delete[] deltas;
|
||||
}
|
||||
|
||||
void NeuralNetwork::Learning::BackPropagation::propagate(const std::vector<float>& expectation)
|
||||
{
|
||||
|
||||
if(deltas==nullptr)
|
||||
{
|
||||
deltas=new float*[network.size()];
|
||||
for(size_t i=0;i<network.size();i++)
|
||||
{
|
||||
deltas[i]=new float[network[i].size()];
|
||||
deltas[i][0]=0.0;
|
||||
}
|
||||
}
|
||||
|
||||
for(size_t j=1;j<network[network.size()-1].size();j++)
|
||||
{
|
||||
deltas[network.size()-1][j]= correction(expectation[j-1],network[network.size()-1][j].output())
|
||||
*network[network.size()-1][j].derivatedOutput();
|
||||
}
|
||||
|
||||
for(int i=(int)network.size()-2;i>0;i--)
|
||||
{
|
||||
if(allowThreads)
|
||||
{
|
||||
std::vector<std::thread> th;
|
||||
size_t s=0;
|
||||
//TODO THIS IS NOT WORKING!!!
|
||||
#define THREADS 4
|
||||
int step =network[i].size()/THREADS;
|
||||
for(int t=1;t<=THREADS;t++)
|
||||
{
|
||||
if(s>=network[i].size())
|
||||
break;
|
||||
th.push_back(std::thread([&i,this](size_t from, size_t to)->void{
|
||||
for(size_t j=from;j<to;j++)
|
||||
{
|
||||
register float deltasWeight = 0;
|
||||
for(size_t k=1;k<this->network[i+1].size();k++)
|
||||
{
|
||||
deltasWeight+=deltas[i+1][k]*this->network[i+1][k].getWeight(j);
|
||||
}
|
||||
//deltas[i][j]*=this->network[i]->operator[](j)->derivatedOutput(); // WHY THE HELL IS SEQ here??
|
||||
}
|
||||
},s,t==THREADS?network[i].size():s+step));//{}
|
||||
s+=step;
|
||||
}
|
||||
for (auto& thr : th)
|
||||
thr.join();
|
||||
}else
|
||||
{
|
||||
for(size_t j=0;j<network[i].size();j++)
|
||||
{
|
||||
register float deltasWeight = 0;
|
||||
for(size_t k=1;k<this->network[i+1].size();k++)
|
||||
{
|
||||
deltasWeight+=deltas[i+1][k]*this->network[i+1][k].getWeight(j);
|
||||
}
|
||||
deltas[i][j]=deltasWeight*this->network[i][j].derivatedOutput();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for(size_t i=1;i<network.size();i++)
|
||||
{
|
||||
size_t max=network[i-1].size();
|
||||
|
||||
for(size_t j=1;j<network[i].size();j++)
|
||||
{
|
||||
network[i][j].setWeight(0,network[i][j].getWeight(0)+deltas[i][j]*learningCoeficient);
|
||||
for(size_t k=1;k<max;k++)
|
||||
{
|
||||
network[i][j].setWeight(k, network[i][j].getWeight(k)+learningCoeficient*deltas[i][j]*network[i-1][k].output());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
float NeuralNetwork::Learning::BackPropagation::teach(const std::vector<float>& p, const std::vector<float>& solution)
|
||||
{
|
||||
std::vector<float> a=network.solve(p);
|
||||
double error=calculateError(solution,a);
|
||||
|
||||
std::vector<float> s;
|
||||
if(noise)
|
||||
{
|
||||
for(size_t i=0;i<solution.size();i++)
|
||||
{
|
||||
s.push_back(solution[i]*((double)((100000-noiseSize)+(rand()%(noiseSize*2+1)))/100000.0));
|
||||
}
|
||||
propagate(s);
|
||||
}else
|
||||
{
|
||||
propagate(solution);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
#ifndef _BACK_PROPAGATION_H_
|
||||
#define _BACK_PROPAGATION_H_
|
||||
|
||||
#include <math.h>
|
||||
#include <thread>
|
||||
#include <cstddef>
|
||||
|
||||
#include "../LayerNetwork.h"
|
||||
#include "Learning.h"
|
||||
|
||||
/*
|
||||
* http://sydney.edu.au/engineering/it/~comp4302/ann4-3s.pdf
|
||||
* http://www.cs.cmu.edu/afs/cs/academic/class/15883-f13/slides/backprop.pdf
|
||||
* http://airccse.org/journal/jcsit/0211ijcsit08.pdf
|
||||
* http://www.cedar.buffalo.edu/~srihari/CSE574/Chap5/Chap5.3-BackProp.pdf
|
||||
* http://stackoverflow.com/questions/13095938/can-somebody-please-explain-the-backpropagation-algorithm-to-me
|
||||
* http://ufldl.stanford.edu/wiki/index.php/Backpropagation_Algorithm
|
||||
*
|
||||
* http://www.cleveralgorithms.com/nature-inspired/neural/backpropagation.html
|
||||
*
|
||||
*/
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
namespace Learning
|
||||
{
|
||||
class BackPropagation : public Learning
|
||||
{
|
||||
public:
|
||||
BackPropagation(LayerNetwork &n): Learning(), network(n) {}
|
||||
virtual ~BackPropagation();
|
||||
|
||||
BackPropagation(const NeuralNetwork::Learning::BackPropagation&) =delete;
|
||||
BackPropagation operator=(const NeuralNetwork::Learning::BackPropagation&) =delete;
|
||||
|
||||
float teach(const std::vector<float>&p,const std::vector<float>&solution);
|
||||
virtual void propagate(const std::vector<float>& expectation);
|
||||
|
||||
protected:
|
||||
LayerNetwork &network;
|
||||
inline virtual float correction(const float& expected, const float& computed) { return expected - computed;};
|
||||
|
||||
float **deltas=nullptr;
|
||||
};
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,21 +0,0 @@
|
||||
#include "Learning.h"
|
||||
|
||||
float NeuralNetwork::Learning::Learning::calculateError(const std::vector<float>& expectation, const std::vector<float>& solution)
|
||||
{
|
||||
register float a=0;
|
||||
for (size_t i=0;i<expectation.size();i++)
|
||||
{
|
||||
a+=pow(expectation[i]-solution[i],2)/2;
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
float NeuralNetwork::Learning::Learning::teachSet(const std::vector<std::pair<std::vector<float>,std::vector<float>>> &set)
|
||||
{
|
||||
double error=0;
|
||||
for (register size_t i=0;i<set.size();i++)
|
||||
{
|
||||
error+=teach(set[i].first,set[i].second);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
#ifndef _S_NN_LEARNING_H_
|
||||
#define _S_NN_LEARNING_H_
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
#include "../FeedForward.h"
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
namespace Learning
|
||||
{
|
||||
const float LearningCoeficient=0.4;
|
||||
class Learning
|
||||
{
|
||||
public:
|
||||
Learning() {};
|
||||
inline virtual ~Learning() {};
|
||||
|
||||
inline virtual void setLearningCoeficient (const float& coef) { learningCoeficient=coef; };
|
||||
|
||||
inline virtual void allowThreading() final {allowThreads=1;}
|
||||
inline virtual void disableThreading() final {allowThreads=0;}
|
||||
|
||||
inline virtual void allowNoise() final {noise=1;}
|
||||
inline virtual void disableNoise() final {noise=0;}
|
||||
inline virtual void setNoiseSize(const unsigned& milipercents) final { noiseSize=milipercents; }
|
||||
|
||||
float calculateError(const std::vector<float> &expectation,const std::vector<float> &solution);
|
||||
virtual float teach(const std::vector<float> &p,const std::vector<float> &solution)=0;
|
||||
virtual float teachSet(const std::vector<std::pair<std::vector<float>,std::vector<float>>> &set) final;
|
||||
|
||||
protected:
|
||||
float learningCoeficient=LearningCoeficient;
|
||||
bool allowThreads=0;
|
||||
bool noise=0;
|
||||
unsigned noiseSize=500;
|
||||
};
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1 +0,0 @@
|
||||
./OpticalBackPropagation.h
|
||||
@@ -1,28 +0,0 @@
|
||||
#ifndef _OPT_BACK_PROPAGATION_H_
|
||||
#define _OPT_BACK_PROPAGATION_H_
|
||||
|
||||
#include "BackPropagation.h"
|
||||
|
||||
/*
|
||||
* http://proceedings.informingscience.org/InSITE2005/P106Otai.pdf
|
||||
*/
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
namespace Learning
|
||||
{
|
||||
class OpticalBackPropagation : public BackPropagation
|
||||
{
|
||||
public:
|
||||
inline OpticalBackPropagation(LayerNetwork &n): BackPropagation(n) {}
|
||||
protected:
|
||||
virtual float correction(const float& expected, const float& computed) override
|
||||
{
|
||||
register float tmp=(expected-computed);
|
||||
register float ret=1+exp(tmp*tmp);
|
||||
return tmp < 0? -ret:ret;
|
||||
};
|
||||
};
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,28 +0,0 @@
|
||||
OBJFILES=\
|
||||
LayerNetwork.o\
|
||||
Learning/Learning.o Learning/BackPropagation.o ../sse_mathfun.o
|
||||
|
||||
LINKFILES=
|
||||
|
||||
LIBNAME=NeuralNetwork
|
||||
|
||||
include ../../Makefile.const
|
||||
|
||||
all: lib
|
||||
|
||||
../sse_mathfun.o: ../sse_mathfun.cpp ../sse_mathfun.h
|
||||
make -C ../
|
||||
|
||||
lib: $(LIBNAME).so $(LIBNAME).a
|
||||
|
||||
$(LIBNAME).so: $(OBJFILES)
|
||||
$(CXX) -shared $(CXXFLAGS) $(OBJFILES) $(LINKFILES) -o $(LIBNAME).so
|
||||
|
||||
$(LIBNAME).a: $(OBJFILES) ./Neuron.h ./Network.h ./ActivationFunction/ActivationFunction.h ./ActivationFunction/Sigmoid.h
|
||||
rm -f $(LIBNAME).a # create new library
|
||||
ar rcv $(LIBNAME).a $(OBJFILES) $(LINKFILES)
|
||||
ranlib $(LIBNAME).a
|
||||
nm --demangle $(LIBNAME).a > $(LIBNAME).nm
|
||||
|
||||
clean:
|
||||
@rm -f ./*.o ./*.so ./*.a ./*.nm ./*/*.o
|
||||
@@ -1,93 +0,0 @@
|
||||
#ifndef _S_NN_NN_H_
|
||||
#define _S_NN_NN_H_
|
||||
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
|
||||
#include "Neuron.h"
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
/**
|
||||
* @brief Default value for lambda
|
||||
*/
|
||||
const float lambda=0.8;
|
||||
|
||||
/**
|
||||
* @author Tomas Cernik (Tom.Cernik@gmail.com)
|
||||
* @brief Abstract class for all Layers of neurons
|
||||
*/
|
||||
class Layer
|
||||
{
|
||||
public:
|
||||
|
||||
virtual ~Layer() {};
|
||||
|
||||
/**
|
||||
* @brief This is a virtual function for selecting neuron
|
||||
* @param neuron is position in layer
|
||||
* @returns Specific neuron
|
||||
*/
|
||||
|
||||
virtual Neuron& operator[](const size_t& neuron)=0;
|
||||
/**
|
||||
* @returns Size of layer
|
||||
*/
|
||||
virtual size_t size() const=0;
|
||||
};
|
||||
|
||||
/**
|
||||
* @author Tomas Cernik (Tom.Cernik@gmail.com)
|
||||
* @brief Abstract model of simple Network
|
||||
*/
|
||||
class Network
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief Constructor for Network
|
||||
* @param lam is parametr for many TransferFunctions
|
||||
*/
|
||||
inline Network() {};
|
||||
|
||||
/**
|
||||
* @brief Virtual destructor for Network
|
||||
*/
|
||||
virtual ~Network() {};
|
||||
|
||||
/**
|
||||
* @brief This is a virtual function for all networks
|
||||
* @param p is a Problem to be solved
|
||||
* @returns Solution of Network for Problem
|
||||
*/
|
||||
virtual std::vector<float> solve(const std::vector<float>& input)=0;
|
||||
|
||||
/**
|
||||
* @brief Getter of layer
|
||||
* @param layer is position fo layer
|
||||
* @returns Retruns specified layer
|
||||
*/
|
||||
virtual Layer& operator[](const size_t &layer)=0;
|
||||
|
||||
/**
|
||||
* @brief Returns parametr for TransferFunctions
|
||||
* @returns lambda (parametr for TransferFunctions)
|
||||
*/
|
||||
inline float getLambda() const {return lambda;}
|
||||
|
||||
/**
|
||||
* @param t is number of threads, if set to 0 or 1 then threading is disabled
|
||||
* @brief Enables or disables Threaded computing of ANN
|
||||
*/
|
||||
|
||||
inline virtual void setThreads(const unsigned&t) final {threads=t;}
|
||||
|
||||
protected:
|
||||
|
||||
/**
|
||||
* @brief Number of threads used by network
|
||||
*/
|
||||
unsigned threads=1;
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
||||
@@ -1,33 +0,0 @@
|
||||
#ifndef _S_NN_PERCEP_H_
|
||||
#define _S_NN_PERCEP_H_
|
||||
|
||||
#include "./FeedForward"
|
||||
#include "TransferFunction/Heaviside.h"
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
/**
|
||||
* @author Tomas Cernik (Tom.Cernik@gmail.com)
|
||||
* @brief Class reprezenting Perceptron - network with only 2 layer (input and output) with Heaviside transfer function
|
||||
*/
|
||||
class Perceptron:public FeedForward
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief Constructor for Perceptron network
|
||||
* @param inputSize size of input Problem
|
||||
* @param outputSize size of output Solution
|
||||
*/
|
||||
Perceptron(const size_t &inputSize, const size_t &outputSize):FeedForward({inputSize,outputSize})
|
||||
{
|
||||
// < iterate throuht layers and set them to Heaviside Function
|
||||
for(int i=0;i<layers;i++)
|
||||
{
|
||||
delete transfer[i];
|
||||
transfer[i]= new TransferFunction::Heaviside(0.5);
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
||||
50
src/NeuralNetwork/Recurrent/Network.cpp
Normal file
50
src/NeuralNetwork/Recurrent/Network.cpp
Normal file
@@ -0,0 +1,50 @@
|
||||
#include <NeuralNetwork/Recurrent/Network.h>
|
||||
std::vector<float> NeuralNetwork::Recurrent::Network::computeOutput(const std::vector<float>& input, unsigned int iterations) {
|
||||
//TODO: check inputSize
|
||||
size_t neuronSize=neurons.size();
|
||||
|
||||
std::vector<float> outputs(neuronSize);
|
||||
for(size_t i=0;i<inputSize;i++) {
|
||||
outputs[i]=input[i];
|
||||
}
|
||||
|
||||
for(unsigned int iter=0;iter< iterations;iter++) {
|
||||
|
||||
for(size_t i=inputSize;i<neuronSize;i++) {
|
||||
outputs[i]=neurons[i].output();
|
||||
}
|
||||
|
||||
// update neurons
|
||||
for(size_t i=inputSize;i<neuronSize;i++) {
|
||||
neurons[i](outputs);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<float> ret;
|
||||
for(size_t i=0;i<outputSize;i++) {
|
||||
ret.push_back(neurons[i+inputSize].output());
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void NeuralNetwork::Recurrent::Network::stringify(std::ostream& out) const {
|
||||
out <<std::setprecision(std::numeric_limits<float>::digits10+1);
|
||||
out << "{\n";
|
||||
out << "\t\"class\":\"NeuralNetwork::Recurrent::Network\",\n";
|
||||
out << "\t\"size\":" << neurons.size() << ",\n";
|
||||
out << "\t\"inputs\":" << inputSize << ",\n";
|
||||
out << "\t\"outputs\":" << outputSize << ",\n";
|
||||
|
||||
out << "\t\"neurons\":[";
|
||||
|
||||
for(size_t i=0;i<neurons.size();i++) {
|
||||
if(i!=0)
|
||||
out << ",\n";
|
||||
|
||||
out << neurons[i].stringify("\t\t");
|
||||
}
|
||||
out << "\n\t]\n";
|
||||
|
||||
out <<"}";
|
||||
}
|
||||
25
src/NeuralNetwork/Recurrent/Neuron.cpp
Normal file
25
src/NeuralNetwork/Recurrent/Neuron.cpp
Normal file
@@ -0,0 +1,25 @@
|
||||
#include <NeuralNetwork/Recurrent/Neuron.h>
|
||||
|
||||
std::string NeuralNetwork::Recurrent::Neuron::stringify(const std::string &prefix) const {
|
||||
std::ostringstream out;
|
||||
out.precision(std::numeric_limits<float>::digits10+1);
|
||||
out <<std::setprecision(std::numeric_limits<float>::digits10+1);
|
||||
|
||||
out << prefix << "{\n";
|
||||
out << prefix << "\t\"class\": \"NeuralNetwork::Recurrent::Neuron\",\n";
|
||||
out << prefix << "\t\"id\": " << id() << ",\n";
|
||||
out << prefix << "\t\"bias\": " << getBias() << ",\n";
|
||||
out << prefix << "\t\"output\": " << output() << ",\n";
|
||||
out << prefix << "\t\"value\": " << value() << ",\n";
|
||||
out << prefix << "\t\"activationFunction\": " << activation->stringify() <<",\n";
|
||||
out << prefix << "\t\"basisFunction\": " << basis->stringify() <<",\n";
|
||||
out << prefix << "\t\"weights\": [";
|
||||
for(size_t j=0;j<weights.size();j++) {
|
||||
if(j!=0)
|
||||
out << ", ";
|
||||
out << weights[j];
|
||||
}
|
||||
out << "]\n";
|
||||
out << prefix << "}";
|
||||
return out.str();
|
||||
}
|
||||
11091
src/sse_mathfun.as
Normal file
11091
src/sse_mathfun.as
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
#include "./sse_mathfun.h"
|
||||
#include "../include/sse_mathfun.h"
|
||||
|
||||
#include <xmmintrin.h>
|
||||
|
||||
@@ -174,12 +174,10 @@ v4sf exp_ps(v4sf x) {
|
||||
#endif
|
||||
v4sf one = *(v4sf*)_ps_1;
|
||||
|
||||
x = _mm_min_ps(x, *(v4sf*)_ps_exp_hi);
|
||||
x = _mm_max_ps(x, *(v4sf*)_ps_exp_lo);
|
||||
x = _mm_max_ps( _mm_min_ps(x, *(v4sf*)_ps_exp_hi), *(v4sf*)_ps_exp_lo);
|
||||
|
||||
/* express exp(x) as exp(g + n*log(2)) */
|
||||
fx = _mm_mul_ps(x, *(v4sf*)_ps_cephes_LOG2EF);
|
||||
fx = _mm_add_ps(fx, *(v4sf*)_ps_0p5);
|
||||
fx = _mm_add_ps(_mm_mul_ps(x, *(v4sf*)_ps_cephes_LOG2EF), *(v4sf*)_ps_0p5);
|
||||
|
||||
/* how to perform a floorf with SSE: just below */
|
||||
#ifndef USE_SSE2
|
||||
@@ -195,36 +193,25 @@ v4sf exp_ps(v4sf x) {
|
||||
#endif
|
||||
/* if greater, substract 1 */
|
||||
v4sf mask = _mm_cmpgt_ps(tmp, fx);
|
||||
mask = _mm_and_ps(mask, one);
|
||||
fx = _mm_sub_ps(tmp, mask);
|
||||
fx = _mm_sub_ps(tmp, _mm_and_ps(mask, one));
|
||||
|
||||
tmp = _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C1);
|
||||
v4sf z = _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C2);
|
||||
x = _mm_sub_ps(x, tmp);
|
||||
x = _mm_sub_ps(x, z);
|
||||
x = _mm_sub_ps(x, _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C1));
|
||||
x = _mm_sub_ps(x, _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C2));
|
||||
|
||||
z = _mm_mul_ps(x,x);
|
||||
|
||||
v4sf y = *(v4sf*)_ps_cephes_exp_p0;
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p1);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p2);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p3);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p4);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p5);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(_mm_mul_ps(y, x), *(v4sf*)_ps_cephes_exp_p1);
|
||||
y = _mm_add_ps(_mm_mul_ps(y, x), *(v4sf*)_ps_cephes_exp_p2);
|
||||
y = _mm_add_ps(_mm_mul_ps(y, x), *(v4sf*)_ps_cephes_exp_p3);
|
||||
y = _mm_add_ps(_mm_mul_ps(y, x), *(v4sf*)_ps_cephes_exp_p4);
|
||||
y = _mm_add_ps(_mm_mul_ps(y, x), *(v4sf*)_ps_cephes_exp_p5);
|
||||
y = _mm_mul_ps(y, _mm_mul_ps(x,x));
|
||||
y = _mm_add_ps(y, x);
|
||||
y = _mm_add_ps(y, one);
|
||||
|
||||
/* build 2^n */
|
||||
#ifndef USE_SSE2
|
||||
z = _mm_movehl_ps(z, fx);
|
||||
mm0 = _mm_cvttps_pi32(fx);
|
||||
mm1 = _mm_cvttps_pi32(z);
|
||||
mm1 = _mm_cvttps_pi32(_mm_movehl_ps( _mm_mul_ps(x,x), fx));
|
||||
mm0 = _mm_add_pi32(mm0, *(v2si*)_pi32_0x7f);
|
||||
mm1 = _mm_add_pi32(mm1, *(v2si*)_pi32_0x7f);
|
||||
mm0 = _mm_slli_pi32(mm0, 23);
|
||||
@@ -234,10 +221,8 @@ v4sf exp_ps(v4sf x) {
|
||||
COPY_MM_TO_XMM(mm0, mm1, pow2n);
|
||||
_mm_empty();
|
||||
#else
|
||||
emm0 = _mm_cvttps_epi32(fx);
|
||||
emm0 = _mm_add_epi32(emm0, *(v4si*)_pi32_0x7f);
|
||||
emm0 = _mm_slli_epi32(emm0, 23);
|
||||
v4sf pow2n = _mm_castsi128_ps(emm0);
|
||||
emm0 = _mm_add_epi32(_mm_cvttps_epi32(fx), *(v4si*)_pi32_0x7f);
|
||||
v4sf pow2n = _mm_castsi128_ps(_mm_slli_epi32(emm0, 23));
|
||||
#endif
|
||||
y = _mm_mul_ps(y, pow2n);
|
||||
return y;
|
||||
|
||||
@@ -1,45 +1,27 @@
|
||||
include ../Makefile.const
|
||||
|
||||
OPTIMALIZATION=
|
||||
LIB_DIR = ../lib
|
||||
#GEN_TESTS=g-01 g-02
|
||||
|
||||
NN_TESTEABLE=\
|
||||
nn-01 nn-02 nn-03 nn-bp-sppeed \
|
||||
nn-bp-xor \
|
||||
nn-obp-xor \
|
||||
nn-rl-xor nn-rl-and nn-rl-xor2\
|
||||
nn-reinforcement nn-04 \
|
||||
nn-pong
|
||||
ALL_TESTS=activation basis recurrent
|
||||
|
||||
NN_TESTS= $(NN_TESTEABLE) nn-pong
|
||||
|
||||
ALL_TESTS=$(NN_TESTEABLE) $(GEN_TESTS)
|
||||
|
||||
LIBS=$(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuralNetwork.a
|
||||
#LIBS=$(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuralNetwork.a
|
||||
#LIBS=-lGenetics.so -lNeuronNetwork
|
||||
|
||||
CXXFLAGS += -I$(LIB_DIR)
|
||||
|
||||
all:| lib $(ALL_TESTS);
|
||||
|
||||
gen: $(GEN_TESTS)
|
||||
|
||||
all:$(ALL_TESTS);
|
||||
|
||||
test: all
|
||||
@for i in $(ALL_TESTS); do echo -n ./$$i; echo -n " - "; ./$$i; echo ""; done
|
||||
|
||||
g-%: g-%.cpp $(LIB_DIR)/Genetics.a
|
||||
$(CXX) $(CXXFLAGS) $(OPTIMALIZATION) -o $@ $< $ $(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuralNetwork.a -lm
|
||||
../src/NeuralNetwork.so: lib
|
||||
|
||||
nn-%: nn-%.cpp $(LIB_DIR)/NeuralNetwork.a
|
||||
$(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuralNetwork.a -lm
|
||||
|
||||
nn-pong: ./nn-pong.cpp $(LIB_DIR)/NeuronNetwork.a
|
||||
$(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuronNetwork.a -lm -lalleg -lGL
|
||||
%: %.cpp ../src/NeuralNetwork.so | lib %.cpp ../src/NeuralNetwork.so
|
||||
$(CXX) $(CXXFLAGS) -I../include -o $@ $< $ -lm ../src/NeuralNetwork.so -msse4.2 -DHAVE_VECLIB
|
||||
|
||||
lib:
|
||||
make -C ../
|
||||
@make -C ../
|
||||
|
||||
clean:
|
||||
@for i in $(ALL_TESTS);do rm -f $$i;done;
|
||||
@for i in $(ALL_TESTS);do rm -f $$i;done;
|
||||
|
||||
|
||||
59
tests/activation.cpp
Normal file
59
tests/activation.cpp
Normal file
@@ -0,0 +1,59 @@
|
||||
#include <NeuralNetwork/ActivationFunction/Heaviside.h>
|
||||
#include <NeuralNetwork/ActivationFunction/Sigmoid.h>
|
||||
#include <NeuralNetwork/ActivationFunction/HyperbolicTangent.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <cassert>
|
||||
#include <chrono>
|
||||
|
||||
union {
|
||||
__m128 v; // SSE 4 x float vector
|
||||
float a[4]; // scalar array of 4 floats
|
||||
} U;
|
||||
|
||||
int main() {
|
||||
{
|
||||
NeuralNetwork::ActivationFunction::Heaviside h(1.0);
|
||||
assert(h(0.2) == 0);
|
||||
assert(h(1.2) == 1);
|
||||
}
|
||||
|
||||
{
|
||||
NeuralNetwork::ActivationFunction::Heaviside h(0.7);
|
||||
assert(h(0.2) == 0);
|
||||
assert(h(0.8) == 1);
|
||||
}
|
||||
|
||||
{
|
||||
NeuralNetwork::ActivationFunction::Sigmoid s(0.7);
|
||||
assert(s(0.1) > 0.517483);
|
||||
assert(s(0.1) < 0.51750);
|
||||
|
||||
assert(s(10) > 0.998989);
|
||||
assert(s(10) < 0.999189);
|
||||
}
|
||||
{
|
||||
NeuralNetwork::ActivationFunction::Sigmoid s(5);
|
||||
assert(s(0.1) > 0.622359);
|
||||
assert(s(0.1) < 0.622559);
|
||||
|
||||
assert(s(0.7) > 0.970588);
|
||||
assert(s(0.7) < 0.970788);
|
||||
}
|
||||
{
|
||||
NeuralNetwork::ActivationFunction::Sigmoid s(0.7);
|
||||
U.a[0]=0.1;
|
||||
U.a[1]=10;
|
||||
U.v=s(U.v);
|
||||
|
||||
assert(U.a[0] > 0.517483);
|
||||
assert(U.a[0] < 0.51750);
|
||||
|
||||
assert(U.a[1] > 0.998989);
|
||||
assert(U.a[1] < 0.999189);
|
||||
}
|
||||
|
||||
std::cout << "OK" << std::endl;
|
||||
|
||||
return 0;
|
||||
}
|
||||
66
tests/basis.cpp
Normal file
66
tests/basis.cpp
Normal file
@@ -0,0 +1,66 @@
|
||||
#include <NeuralNetwork/BasisFunction/Linear.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <cassert>
|
||||
#include <chrono>
|
||||
|
||||
int main() {
|
||||
{
|
||||
NeuralNetwork::BasisFunction::Linear l;
|
||||
assert(39.0==l.compute({1,2,3,5},{1,2,3,5}));
|
||||
assert(39.0==l.computeStreaming({1,2,3,5},{1,2,3,5}));
|
||||
}
|
||||
{
|
||||
NeuralNetwork::BasisFunction::Linear l;
|
||||
assert(88.0==l.computeStreaming({1,2,3,5,7},{1,2,3,5,7}));
|
||||
assert(88.0==l.compute({1,2,3,5,7},{1,2,3,5,7}));
|
||||
}
|
||||
{
|
||||
NeuralNetwork::BasisFunction::Linear l;
|
||||
std::vector<float> w;
|
||||
for(int in=0;in<100;in++) {
|
||||
w.push_back(2);
|
||||
}
|
||||
assert(400.0==l.computeStreaming(w,w));
|
||||
assert(400.0==l.compute(w,w));
|
||||
}
|
||||
{
|
||||
NeuralNetwork::BasisFunction::Linear l;
|
||||
std::vector<float> w;
|
||||
for(int in=0;in<55;in++) {
|
||||
w.push_back(2);
|
||||
}
|
||||
assert(220.0==l.computeStreaming(w,w));
|
||||
assert(220.0==l.compute(w,w));
|
||||
}
|
||||
/*
|
||||
std::vector<float> w;
|
||||
std::vector<float> i;
|
||||
for(int in=0;in<100000;in++) {
|
||||
w.push_back(2);
|
||||
i.push_back(2);
|
||||
}
|
||||
|
||||
NeuralNetwork::BasisFunction::Linear l;
|
||||
{
|
||||
auto start = std::chrono::high_resolution_clock::now();
|
||||
for(int in=0;in<1000;in++) {
|
||||
l.compute(w,i);
|
||||
}
|
||||
auto end = std::chrono::high_resolution_clock::now();
|
||||
std::chrono::duration<double> diff = end-start;
|
||||
std::cout << "C++ :" << diff.count() << " s\n";
|
||||
}
|
||||
{
|
||||
auto start = std::chrono::high_resolution_clock::now();
|
||||
for(int in=0;in<1000;in++) {
|
||||
l.computeStreaming(w,i);
|
||||
}
|
||||
auto end = std::chrono::high_resolution_clock::now();
|
||||
std::chrono::duration<double> diff = end-start;
|
||||
std::cout << "SSE :" << diff.count() << " s\n";
|
||||
}
|
||||
*/
|
||||
std::cout <<"OK" << std::endl;
|
||||
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
#include "../src/NeuralNetwork/FeedForward.h"
|
||||
#include "../src/NeuralNetwork/Learning/BackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
//typedef Shin::NeuronNetwork::Problem X;
|
||||
|
||||
class X: public Shin::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :Problem(a) {}
|
||||
X(const std::vector<bool> &a):Problem() { for (bool s:a) data.push_back((float)s);}
|
||||
protected:
|
||||
};
|
||||
|
||||
int main(int argc,char**)
|
||||
{
|
||||
srand(time(NULL));
|
||||
std::vector<Shin::Solution> s;
|
||||
std::vector<X> p;
|
||||
|
||||
//
|
||||
s.push_back(Shin::Solution(std::vector<float>({1})));
|
||||
p.push_back(X(std::vector<bool>({0})));
|
||||
|
||||
s.push_back(Shin::Solution(std::vector<float>({0})));
|
||||
p.push_back(X(std::vector<bool>({1})));
|
||||
|
||||
NeuralNetwork::FeedForward q({1,5000,5000,15000,2});
|
||||
if(argc > 1)
|
||||
{
|
||||
std::cerr << "THREADING\n";
|
||||
q.setThreads(2);
|
||||
}
|
||||
#include <chrono>
|
||||
auto t1 = std::chrono::high_resolution_clock::now();
|
||||
for(int i=0;i<1000;i++)
|
||||
{
|
||||
//b.teach(p[i%2],s[i%2]);
|
||||
q.solve(p[i%2])[0];
|
||||
//std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
|
||||
}
|
||||
auto t2 = std::chrono::high_resolution_clock::now();
|
||||
std::cout << "Time: " << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() << std::endl;
|
||||
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
|
||||
#include "../src/NeuralNetwork/FeedForward"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
class X: public Shin::Problem
|
||||
{
|
||||
protected:
|
||||
std::vector<float> representation() const
|
||||
{
|
||||
return std::vector<float>({1,1});
|
||||
}
|
||||
};
|
||||
|
||||
class X1: public Shin::Problem
|
||||
{
|
||||
protected:
|
||||
std::vector<float> representation() const
|
||||
{
|
||||
return std::vector<float>({1});
|
||||
}
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
NeuralNetwork::FeedForward n({2,4,2});
|
||||
NeuralNetwork::FeedForward nq({2,4,2});
|
||||
if(n[2].size() != 4)
|
||||
{
|
||||
std::cout << "1) Actual size:" << n[1].size();
|
||||
return 1;
|
||||
}
|
||||
|
||||
if(nq[1].size() != 4)
|
||||
{
|
||||
std::cout << "QUICK Actual size:" << nq[1].size();
|
||||
return 1;
|
||||
}
|
||||
|
||||
n[2][0].setPotential(25);
|
||||
nq[2][0].setPotential(25);
|
||||
|
||||
std::cout << "Potential: " << n[2][0].getPotential() << "\n";
|
||||
std::cout << "Potential: " << nq[2][0].getPotential() << "\n";
|
||||
|
||||
Shin::Solution s =n.solve(X());
|
||||
Shin::Solution sq =nq.solve(X());
|
||||
|
||||
if(s.size()!=2)
|
||||
{
|
||||
std::cout << "1";
|
||||
return 1;
|
||||
}
|
||||
|
||||
for(int i=0;i<2;i++)
|
||||
{
|
||||
if(s[i]!=sq[i])
|
||||
{
|
||||
std::cout << " 4 - " << i << " expected "<<s[i] << " was " <<sq[i];
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
n[2][0].setWeight(0,26.0);
|
||||
nq[2][0].setWeight(0,26.0);
|
||||
|
||||
s =n.solve(X());
|
||||
sq =n.solve(X());
|
||||
|
||||
if(s.size()!=2)
|
||||
{
|
||||
std::cout << "a1";
|
||||
return 1;
|
||||
}
|
||||
if(s[0]!=1)
|
||||
{
|
||||
std::cout << "a2";
|
||||
return 1;
|
||||
}
|
||||
if(s[1]!=1)
|
||||
{
|
||||
std::cout << "a3";
|
||||
return 1;
|
||||
}
|
||||
|
||||
for(int i=0;i<2;i++)
|
||||
{
|
||||
if(s[i]!=sq[i])
|
||||
{
|
||||
std::cout << " a4 - " << i << " expected "<<s[i] << " was " <<sq[i];
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
#include "../src/NeuralNetwork/FeedForward"
|
||||
#include "../src/NeuralNetwork/Learning/BackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
class X: public Shin::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :Problem(),q(a.q) {}
|
||||
X(const std::vector<float> &a):q(a) {}
|
||||
std::vector<float> representation() const
|
||||
{
|
||||
return q;
|
||||
}
|
||||
protected:
|
||||
std::vector<float> q;
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
std::vector<Shin::Solution> s;
|
||||
std::vector<X> p;
|
||||
|
||||
//
|
||||
s.push_back(Shin::Solution(std::vector<float>({0})));
|
||||
p.push_back(X(std::vector<float>({1,0})));
|
||||
s.push_back(Shin::Solution(std::vector<float>({0})));
|
||||
p.push_back(X(std::vector<float>({0,1})));
|
||||
s.push_back(Shin::Solution(std::vector<float>({0})));
|
||||
p.push_back(X(std::vector<float>({0,0})));
|
||||
s.push_back(Shin::Solution(std::vector<float>({1})));
|
||||
p.push_back(X(std::vector<float>({1,1})));
|
||||
|
||||
Shin::NeuralNetwork::FeedForward q({2,4,1});
|
||||
Shin::NeuralNetwork::Learning::BackPropagation b(q);
|
||||
b.setLearningCoeficient(10);
|
||||
|
||||
for(int i=0;i<4;i++)
|
||||
{
|
||||
b.teach(p[i%4],s[i%4]);
|
||||
std::cerr << i%4 <<". FOR: [" << p[i%4].representation()[0] << "," <<p[i%4].representation()[1] << "] res: " << q.solve(p[i%4])[0] << " should be " <<
|
||||
s[i%4][0]<<"\n";
|
||||
}
|
||||
|
||||
for(int i=0;i<40000;i++)
|
||||
{
|
||||
b.teach(p[i%4],s[i%4]);
|
||||
}
|
||||
std::cerr << "LEARNED\n";
|
||||
for(int i=0;i<4;i++)
|
||||
{
|
||||
b.teach(p[i%4],s[i%4]);
|
||||
std::cerr << i%4 <<". FOR: [" << p[i%4].representation()[0] << "," <<p[i%4].representation()[1] << "] res: " << q.solve(p[i%4])[0] << " should be " <<
|
||||
s[i%4][0]<<"\n";
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
#include "../src/NeuralNetwork/FeedForward"
|
||||
|
||||
#include <iostream>
|
||||
class X: public Shin::Problem
|
||||
{
|
||||
public: X(bool x,bool y):Problem() {data.push_back(x);data.push_back(y);}
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
srand(time(NULL));
|
||||
int lm=5;
|
||||
Shin::NeuralNetwork::FeedForward net({2,lm,1});
|
||||
bool x=1;
|
||||
int prev_err=0;
|
||||
int err=0;
|
||||
int l;
|
||||
int n;
|
||||
int w;
|
||||
int pot;
|
||||
int wei;
|
||||
int c=0;
|
||||
std::cout << "\ntest 1 & 1 -" << net.solve(X(1,1))[0];
|
||||
std::cout << "\ntest 1 & 0 -" << net.solve(X(1,0))[0];
|
||||
std::cout << "\ntest 0 & 1 - " << net.solve(X(0,1))[0];
|
||||
std::cout << "\ntest 0 & 0- " << net.solve(X(0,0))[0];
|
||||
std::cout << "\n---------------------------------------";
|
||||
do{
|
||||
if(c%10000 ==1)
|
||||
{
|
||||
std::cout << "\nmixed";
|
||||
srand(time(NULL));
|
||||
}
|
||||
err=0;
|
||||
c++;
|
||||
l=rand()%2+1;
|
||||
n=rand()%lm;
|
||||
w=rand()%2;
|
||||
if(l==2)
|
||||
n=0;
|
||||
pot=net[l][n].getPotential();
|
||||
net[l][n].setPotential(pot*(rand()%21+90)/100);
|
||||
wei=net[l][n].getWeight(w);
|
||||
net[l][n].setWeight(w,wei*(rand()%21+90)/100);
|
||||
|
||||
for(int i=0;i<100;i++)
|
||||
{
|
||||
bool x= rand()%2;
|
||||
bool y=rand()%2;
|
||||
Shin::Solution s =net.solve(X(x,y));
|
||||
if(s[0]!= (x xor y))
|
||||
err++;
|
||||
}
|
||||
|
||||
if(err > prev_err)
|
||||
{
|
||||
net[l][n].setPotential(pot);
|
||||
net[l][n].setWeight(w,wei);
|
||||
};
|
||||
prev_err=err;
|
||||
if(err <1)
|
||||
x=0;
|
||||
}while(x);
|
||||
std::cout << "\ntest 1 & 1 -" << net.solve(X(1,1))[0];
|
||||
std::cout << "\ntest 1 & 0 -" << net.solve(X(1,0))[0];
|
||||
std::cout << "\ntest 0 & 1 - " << net.solve(X(0,1))[0];
|
||||
std::cout << "\ntest 0 & 0- " << net.solve(X(0,0))[0];
|
||||
std::cout << "\nTotaly: " << c << "\n";
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
|
||||
#include "../src/NeuralNetwork/FeedForward"
|
||||
|
||||
#include <iostream>
|
||||
#include <stdlib.h>
|
||||
|
||||
|
||||
int main()
|
||||
{
|
||||
|
||||
srand(time(NULL));
|
||||
NeuralNetwork::FeedForward ns({1,1});
|
||||
ns[1][0].setWeight(-1,0);
|
||||
ns[1][0].setWeight(0,1);
|
||||
|
||||
Shin::Solution ss =ns.solve(Shin::Problem({1}));
|
||||
|
||||
if(ss[0] < 0.689874481 || ss[0] > 0.69)
|
||||
{
|
||||
std::cout << "1) Wrong counter: shoul be 0.626961, is: " << ss[0];
|
||||
return 1;
|
||||
}
|
||||
|
||||
NeuralNetwork::FeedForward xorF({2,2,1},0.8);
|
||||
|
||||
xorF[1][0].setWeight(-1,-6.06);
|
||||
xorF[1][0].setWeight(0,-11.62);
|
||||
xorF[1][0].setWeight(1,10.99);
|
||||
|
||||
xorF[1][1].setWeight(-1,-7.19);
|
||||
xorF[1][1].setWeight(0,12.88);
|
||||
xorF[1][1].setWeight(1,-13-13);
|
||||
|
||||
xorF[2][0].setWeight(-1,-6.56);
|
||||
xorF[2][0].setWeight(0,13.34);
|
||||
xorF[2][0].setWeight(1,-7.19);
|
||||
|
||||
ss= xorF.solve(Shin::Problem({0,1}));
|
||||
|
||||
if(ss[0] > 1 || ss[0] < 0.98 )
|
||||
{
|
||||
std::cout << "2) wrong output "<< ss[0] << "\n";
|
||||
return 1;
|
||||
}
|
||||
|
||||
ss= xorF.solve(Shin::Problem({0,1}));
|
||||
|
||||
if(ss[0] > 1 || ss[0] < 0.98 )
|
||||
{
|
||||
std::cout << "3) wrong output "<< ss[0];
|
||||
return 1;
|
||||
}
|
||||
|
||||
ss= xorF.solve(Shin::Problem({0,0}));
|
||||
|
||||
if(ss[0] <0 || ss[0] > 0.06 )
|
||||
{
|
||||
std::cout << "4) wrong output "<< ss[0] ;
|
||||
return 1;
|
||||
}
|
||||
|
||||
ss= xorF.solve(Shin::Problem({1,1}));
|
||||
|
||||
if(ss[0] <0 || ss[0] > 0.06 )
|
||||
{
|
||||
std::cout << "5) wrong output "<< ss[0];
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
#include "../src/NeuralNetwork/FeedForward"
|
||||
#include "../src/NeuralNetwork/Learning/BackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
class X: public Shin::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :Problem(a.data) {}
|
||||
X(const std::vector<float> &a):Problem(a) {}
|
||||
};
|
||||
|
||||
int main(int argc, char**)
|
||||
{
|
||||
srand(time(NULL));
|
||||
std::vector<Shin::Solution> s;
|
||||
std::vector<X> p;
|
||||
|
||||
//
|
||||
s.push_back(Shin::Solution(std::vector<float>({1})));
|
||||
p.push_back(X(std::vector<float>({0})));
|
||||
|
||||
s.push_back(Shin::Solution(std::vector<float>({0})));
|
||||
p.push_back(X(std::vector<float>({1})));
|
||||
|
||||
Shin::NeuralNetwork::FeedForward q({1,5000,5000,5000,1});
|
||||
Shin::NeuralNetwork::Learning::BackPropagation b(q);
|
||||
|
||||
if(argc >1)
|
||||
{
|
||||
std::cerr << "Allowing threadnig\n";
|
||||
b.allowThreading();
|
||||
}
|
||||
for(int i=0;i<2;i++)
|
||||
{
|
||||
b.teach(p[i%2],s[i%2]);
|
||||
std::cerr << i%2 <<". FOR: [" << p[i%2][0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
|
||||
}
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
#include "../src/NeuralNetwork/FeedForward"
|
||||
#include "../src/NeuralNetwork/Learning/BackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
class X: public Shin::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :Problem(a) {}
|
||||
X(const std::vector<float> &a):Problem() {data=a;}
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
srand(time(NULL));
|
||||
|
||||
for (int test=0;test<2;test++)
|
||||
{
|
||||
Shin::NeuralNetwork::FeedForward q({2,3,1});
|
||||
Shin::NeuralNetwork::Learning::BackPropagation b(q);
|
||||
|
||||
std::vector<std::pair<Shin::Problem, Shin::Solution> > set;
|
||||
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({0,0}),Shin::Solution({0})));
|
||||
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({1,0}),Shin::Solution({1})));
|
||||
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({1,1}),Shin::Solution({0})));
|
||||
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({0,1}),Shin::Solution({1})));
|
||||
if(test)
|
||||
{
|
||||
std::cerr << "Testing with entropy\n";
|
||||
b.allowNoise();
|
||||
}else
|
||||
{
|
||||
std::cerr << "Testing without entropy\n";
|
||||
}
|
||||
b.setLearningCoeficient(20);//8);
|
||||
for(int j=0;;j++)
|
||||
{
|
||||
double err=b.teachSet(set);
|
||||
if(err <0.3)
|
||||
{
|
||||
// b.setLearningCoeficient(5);
|
||||
}
|
||||
if(err <0.1)
|
||||
{
|
||||
// b.setLearningCoeficient(0.2);
|
||||
}
|
||||
if(err <0.001)
|
||||
{
|
||||
std::cerr << j << "(" << err <<"):\n";
|
||||
for(int i=0;i<4;i++)
|
||||
{
|
||||
std::cerr << "\t" << i%4 <<". FOR: [" << set[i%4].first[0] << "," <<set[i%4].first[1] << "] res: " <<
|
||||
q.solve(set[i%4].first)[0] << " should be " << set[i%4].second[0]<<"\n";
|
||||
}
|
||||
}
|
||||
if(err <0.001)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
#include "../src/NeuralNetwork/FeedForward"
|
||||
#include "../src/NeuralNetwork/Learning/OpticalBackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
class X: public Shin::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :Problem(a) {}
|
||||
X(const std::vector<float> &a):Problem() {data=a;}
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
srand(time(NULL));
|
||||
for (int test=0;test<2;test++)
|
||||
{
|
||||
Shin::NeuralNetwork::FeedForward q({2,40,1});
|
||||
Shin::NeuralNetwork::Learning::OpticalBackPropagation b(q);
|
||||
b.setLearningCoeficient(0.1);
|
||||
|
||||
std::vector<std::pair<Shin::Problem, Shin::Solution> > set;
|
||||
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({0,0}),Shin::Solution({0})));
|
||||
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({1,0}),Shin::Solution({1})));
|
||||
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({1,1}),Shin::Solution({0})));
|
||||
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({0,1}),Shin::Solution({1})));
|
||||
if(test)
|
||||
{
|
||||
std::cerr << "Testing with entropy\n";
|
||||
b.allowNoise();
|
||||
}else
|
||||
{
|
||||
std::cerr << "Testing without entropy\n";
|
||||
}
|
||||
for(int j=0;;j++)
|
||||
{
|
||||
double err=b.teachSet(set);
|
||||
if(err <0.3)
|
||||
{
|
||||
// b.setLearningCoeficient(5);
|
||||
}
|
||||
if(err <0.1)
|
||||
{
|
||||
// b.setLearningCoeficient(0.2);
|
||||
}
|
||||
if(err <0.001)
|
||||
{
|
||||
std::cerr << j << "(" << err <<"):\n";
|
||||
for(int i=0;i<4;i++)
|
||||
{
|
||||
std::cerr << "\t" << i%4 <<". FOR: [" << set[i%4].first[0] << "," <<set[i%4].first[1] << "] res: " <<
|
||||
q.solve(set[i%4].first)[0] << " should be " << set[i%4].second[0]<<"\n";
|
||||
}
|
||||
}
|
||||
if(err <0.001)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,344 +0,0 @@
|
||||
#include <allegro.h>
|
||||
#include <cstdlib>
|
||||
#include <time.h>
|
||||
#include "../src/NeuronNetwork/Learning/QLearning.h"
|
||||
#include <sys/time.h>
|
||||
|
||||
int learningGames=6000;
|
||||
|
||||
int ball_x = 320;
|
||||
int ball_y = 240;
|
||||
|
||||
int ball_tempX = 320;
|
||||
int ball_tempY = 240;
|
||||
|
||||
int p1_x = 20;
|
||||
int p1_y = 210;
|
||||
|
||||
int p1_tempX = 20;
|
||||
int p1_tempY = 210;
|
||||
|
||||
int p2_x = 620;
|
||||
int p2_y = 210;
|
||||
|
||||
int p2_tempX = 620;
|
||||
int p2_tempY = 210;
|
||||
|
||||
int i=0;
|
||||
|
||||
long game=0;
|
||||
int q=0;
|
||||
int speed=1;
|
||||
|
||||
bool randomLearner=0;
|
||||
|
||||
int dir; //This will keep track of the circles direction
|
||||
//1= up and left, 2 = down and left, 3= up and right, 4 = down and right
|
||||
|
||||
BITMAP *buffer; //This will be our temporary bitmap for double buffering
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
{
|
||||
public:
|
||||
X(int p1,int ballX,int ballY,int p2)//, int ballY)
|
||||
{
|
||||
data.push_back((float)p1/480.0);
|
||||
data.push_back((float)ballX/640.0);
|
||||
data.push_back((float)ballY/480.0);
|
||||
}
|
||||
};
|
||||
|
||||
Shin::NeuronNetwork::Learning::QLearning l(3,15,3);
|
||||
|
||||
std::vector <std::pair<Shin::NeuronNetwork::Problem,int>> p1x;
|
||||
|
||||
void propagateOKtoP1(double quality=10)
|
||||
{
|
||||
l.learnDelayed(p1x,quality);
|
||||
p1x.clear();
|
||||
}
|
||||
|
||||
void moveBall(){
|
||||
|
||||
ball_tempX = ball_x;
|
||||
ball_tempY = ball_y;
|
||||
|
||||
if (dir == 1 && ball_x > 5 && ball_y > 5){
|
||||
|
||||
if( ball_x == p1_x + 15 && ball_y >= p1_y && ball_y <= p1_y + 60){
|
||||
dir = rand()% 2 + 3;
|
||||
propagateOKtoP1(100);
|
||||
}else{
|
||||
--ball_x;
|
||||
--ball_y;
|
||||
}
|
||||
|
||||
} else if (dir == 2 && ball_x > 5 && ball_y < 475){
|
||||
|
||||
if( ball_x == p1_x + 15 && ball_y >= p1_y && ball_y <= p1_y + 60){
|
||||
dir = rand()% 2 + 3;
|
||||
propagateOKtoP1(100);
|
||||
}else{
|
||||
--ball_x;
|
||||
++ball_y;
|
||||
}
|
||||
|
||||
} else if (dir == 3 && ball_x < 635 && ball_y > 5){
|
||||
|
||||
if( ball_x + 5 == p2_x && ball_y >= p2_y && ball_y <= p2_y + 60){
|
||||
dir = rand()% 2 + 1;
|
||||
}else{
|
||||
++ball_x;
|
||||
--ball_y;
|
||||
}
|
||||
|
||||
} else if (dir == 4 && ball_x < 635 && ball_y < 475){
|
||||
|
||||
if( ball_x + 5 == p2_x && ball_y >= p2_y && ball_y <= p2_y + 60){
|
||||
dir = rand()% 2 + 1;
|
||||
}else{
|
||||
++ball_x;
|
||||
++ball_y;
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
if (dir == 1 || dir == 3) ++dir;
|
||||
else if (dir == 2 || dir == 4) --dir;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
char p1Move(){
|
||||
|
||||
X p=X(p1_y,ball_x,ball_y,p2_y);
|
||||
|
||||
if(game <learningGames)
|
||||
{
|
||||
if(randomLearner)
|
||||
{
|
||||
register int tmp=game%3;
|
||||
if(rand()%5==0)
|
||||
{
|
||||
tmp=(tmp+rand())%3;
|
||||
}
|
||||
if(tmp==1)
|
||||
{
|
||||
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,2));//,ball_tempX,ball_tempY));
|
||||
return 1;
|
||||
}else if(tmp==0)
|
||||
{
|
||||
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,0));//,ball_tempX,ball_tempY));
|
||||
return -1;
|
||||
}else
|
||||
{
|
||||
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,1));//,ball_tempX,ball_tempY));
|
||||
return 0;
|
||||
}
|
||||
}else
|
||||
{
|
||||
if( p1_tempY > ball_y && p1_y > 0){
|
||||
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,0));//,ball_tempX,ball_tempY));
|
||||
return -1;
|
||||
} else if( p1_tempY < ball_y && p1_y < 420){
|
||||
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,2));//,ball_tempX,ball_tempY));
|
||||
return 1;
|
||||
}else
|
||||
{
|
||||
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,1));//,ball_tempX,ball_tempY));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
int j=l.getChoice(p);
|
||||
|
||||
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,j));//,ball_tempX,ball_tempY));
|
||||
|
||||
return j-1;
|
||||
}
|
||||
|
||||
char p2Move(){
|
||||
if(game >= learningGames)
|
||||
{
|
||||
if(key[KEY_UP])
|
||||
return 1;
|
||||
else if( key[KEY_DOWN])
|
||||
return -1;
|
||||
else
|
||||
return 0;
|
||||
}else
|
||||
{
|
||||
if(rand()%10==0)
|
||||
{
|
||||
return (rand()%3)-1;
|
||||
}
|
||||
if( p2_tempY > ball_y){
|
||||
return -1;
|
||||
} else if( p2_tempY < ball_y){
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void startNew(){
|
||||
|
||||
clear_keybuf();
|
||||
if(game==learningGames)
|
||||
textout_ex( screen, font, "Player 1 learned! Push a button to start a game.", 160, 240, makecol( 255, 0, 0), makecol( 0, 0, 0));
|
||||
|
||||
if(game >= learningGames)
|
||||
readkey();
|
||||
|
||||
clear_to_color( buffer, makecol( 0, 0, 0));
|
||||
ball_x = 350;
|
||||
ball_y = rand()%481;
|
||||
|
||||
p1_x = 20;
|
||||
p1_y = 210;
|
||||
|
||||
p2_x = 620;
|
||||
p2_y = 210;
|
||||
|
||||
}
|
||||
|
||||
|
||||
void checkWin(){
|
||||
|
||||
int won=0;
|
||||
if ( ball_x < p1_x){
|
||||
won=1;
|
||||
game++;
|
||||
textout_ex( screen, font, "Player 2 Wins!", 320, 240, makecol( 255, 0, 0), makecol( 0, 0, 0));
|
||||
propagateOKtoP1(-100);
|
||||
startNew();
|
||||
|
||||
} else if ( ball_x > p2_x){
|
||||
game++;
|
||||
won=1;
|
||||
textout_ex( screen, font, "Player 1 Wins!", 320, 240, makecol( 255, 0, 0), makecol( 0, 0, 0));
|
||||
propagateOKtoP1(100);
|
||||
startNew();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
void setupGame(){
|
||||
|
||||
acquire_screen();
|
||||
rectfill( buffer, p1_x, p1_y, p1_x + 10, p1_y + 60, makecol ( 0, 0, 255));
|
||||
rectfill( buffer, p2_x, p2_y, p2_x + 10, p2_y + 60, makecol ( 0, 0, 255));
|
||||
circlefill ( buffer, ball_x, ball_y, 5, makecol( 128, 255, 0));
|
||||
draw_sprite( screen, buffer, 0, 0);
|
||||
release_screen();
|
||||
srand( time(NULL));
|
||||
dir = rand() % 4 + 1;
|
||||
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char**argv)
|
||||
{
|
||||
allegro_init();
|
||||
install_keyboard();
|
||||
set_color_depth(16);
|
||||
set_gfx_mode( GFX_AUTODETECT_WINDOWED, 640, 480, 0, 0);
|
||||
|
||||
l.setLearningCoeficient(0.01,0.01);
|
||||
if(argc>=4 && argv[3][0]=='o')
|
||||
{
|
||||
std::cerr << "USING Optical Backpropagation\n";
|
||||
l.opticalBackPropagation();
|
||||
}
|
||||
if(argc>=3)
|
||||
{
|
||||
std::cerr << "Setting learning coefficients to:" << atof(argv[1]) << "," << atof(argv[2]) << "\n";
|
||||
l.setLearningCoeficient(atof(argv[1]),atof(argv[2]));
|
||||
}
|
||||
if(argc >=5)
|
||||
{
|
||||
std::cerr << "Setting learning games to:" << atof(argv[4]) << "\n";
|
||||
learningGames=atof(argv[4]);
|
||||
}
|
||||
if(argc >=6 && argv[5][0]=='r')
|
||||
{
|
||||
std::cerr << "Setting random learning\n";
|
||||
randomLearner=1;
|
||||
}
|
||||
buffer = create_bitmap( 640, 480);
|
||||
setupGame();
|
||||
speed=51;
|
||||
int sleepTime=1000;
|
||||
while(!key[KEY_ESC])
|
||||
{
|
||||
q++;
|
||||
if(key[KEY_T])
|
||||
{
|
||||
std::cout << "ADDING next 500 learning games\n";
|
||||
usleep(500000);
|
||||
learningGames+=500;
|
||||
}
|
||||
if(game < learningGames)
|
||||
{
|
||||
if( key[KEY_UP] && speed < 200){
|
||||
speed+=5;
|
||||
}else if( key[KEY_DOWN] && speed >1 ){
|
||||
speed-=5;
|
||||
}
|
||||
if(speed <= 0)
|
||||
{
|
||||
speed=1;
|
||||
}
|
||||
}else
|
||||
{
|
||||
speed=1;
|
||||
}
|
||||
|
||||
register char p1dir=p1Move();
|
||||
register char p2dir=p2Move();
|
||||
|
||||
p1_tempY = p1_y;
|
||||
p2_tempY = p2_y;
|
||||
|
||||
if(p1dir < 0 && p1_y > 0){
|
||||
--p1_y;
|
||||
} else if( p1dir > 0 && p1_y < 420){
|
||||
++p1_y;
|
||||
}
|
||||
if(p2dir > 0 && p2_y > 0){
|
||||
--p2_y;
|
||||
} else if( p2dir < 0 && p2_y < 420){
|
||||
++p2_y;
|
||||
}
|
||||
moveBall();
|
||||
if(key[KEY_PLUS_PAD] && sleepTime >=10)
|
||||
sleepTime-=50;
|
||||
else if(key[KEY_MINUS_PAD] && sleepTime <=15000)
|
||||
sleepTime+=50;
|
||||
|
||||
if(i%speed==0)
|
||||
{
|
||||
acquire_screen();
|
||||
rectfill( buffer, p1_tempX, p1_tempY, p1_tempX + 10, p1_tempY + 60, makecol ( 0, 0, 0));
|
||||
rectfill( buffer, p1_x, p1_y, p1_x + 10, p1_y + 60, makecol ( 0, 0, 255));
|
||||
|
||||
rectfill( buffer, p2_tempX, p2_tempY, p2_tempX + 10, p2_tempY + 60, makecol ( 0, 0, 0));
|
||||
rectfill( buffer, p2_x, p2_y, p2_x + 10, p2_y + 60, makecol ( 0, 0, 255));
|
||||
|
||||
circlefill ( buffer, ball_tempX, ball_tempY, 5, makecol( 0, 0, 0));
|
||||
circlefill ( buffer, ball_x, ball_y, 5, makecol( 128, 255, 0));
|
||||
draw_sprite( screen, buffer, 0, 0);
|
||||
release_screen();
|
||||
usleep(sleepTime);
|
||||
}
|
||||
checkWin();
|
||||
i++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
END_OF_MAIN()
|
||||
@@ -1,95 +0,0 @@
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/Reinforcement.h"
|
||||
#include "../src/NeuronNetwork/Solution.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :q(a.q) {}
|
||||
X(const std::vector<float> &a):q(a) {}
|
||||
std::vector<float> representation() const
|
||||
{
|
||||
return q;
|
||||
}
|
||||
protected:
|
||||
std::vector<float> q;
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
srand(time(NULL));
|
||||
|
||||
std::vector<X> p;
|
||||
|
||||
p.push_back(X(std::vector<float>({0,0})));
|
||||
|
||||
p.push_back(X(std::vector<float>({1,1})));
|
||||
|
||||
Shin::NeuronNetwork::FeedForward q({2,6,2});
|
||||
Shin::NeuronNetwork::Learning::Reinforcement b(q);
|
||||
b.getPropagator().setLearningCoeficient(1);
|
||||
int i=0;
|
||||
b.setQualityFunction(
|
||||
[&i](const Shin::NeuronNetwork::Problem &,const Shin::NeuronNetwork::Solution &s)->float
|
||||
{
|
||||
if(i%2==0)
|
||||
{
|
||||
//ocekavame 1
|
||||
int e=(s[0]-0.80)*15.0;//+(abs(s[1])-0.5)*100.0;
|
||||
return e;
|
||||
}else
|
||||
{
|
||||
//ocekavame 0
|
||||
int e=(0.20-s[0])*15.0;//+(0.4-abs(s[1]))*100.0;
|
||||
return e;
|
||||
}
|
||||
return 1.0;
|
||||
});
|
||||
for(i=0;i < 500000000;i++)
|
||||
{
|
||||
if(i==75000)
|
||||
{
|
||||
std::cerr << "SSSSSS1XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n";
|
||||
b.setCoef(1);
|
||||
}
|
||||
if(i==150000)
|
||||
{
|
||||
std::cerr << "SSSSSS1XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n";
|
||||
b.setCoef(0.51);
|
||||
}
|
||||
if(i==300000)
|
||||
{
|
||||
std::cerr << "SSSSSS2XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n";
|
||||
b.setCoef(0.15);
|
||||
}
|
||||
b.learn(p[i%2]);
|
||||
|
||||
if(i%100000==0)
|
||||
srand(time(NULL));
|
||||
if(i%10000==0)
|
||||
for(int j=0;j<2;j++)
|
||||
{
|
||||
std::cerr << j%4 <<". FOR: [" << p[j%4].representation()[0] << "," <<p[j%4].representation()[0] << "] res: " << q.solve(p[j%4])[0] << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
/* int i=0;
|
||||
std::cerr << i%4 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
|
||||
|
||||
for(int i=0;i<2000;i++)sa
|
||||
{
|
||||
b.teach(p[i%2],s[i%2]);
|
||||
std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
|
||||
}
|
||||
b.debugOn();
|
||||
for(int i=0;i<2;i++)
|
||||
{
|
||||
b.teach(p[i%2],s[i%2]);
|
||||
std::cerr << i%4 <<". FOR: [" << p[i%4].representation()[0] << "," <<p[i%4].representation()[0] << "] res: " << q.solve(p[i%4])[0] << " should be " <<
|
||||
s[i%4][0]<<"\n";
|
||||
}
|
||||
b.debugOff();*/
|
||||
}
|
||||
@@ -1,82 +0,0 @@
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/Reinforcement.h"
|
||||
#include "../src/NeuronNetwork/Solution.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :Problem(a) {}
|
||||
X(const std::vector<float> &a):Problem() {data=a;}
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
srand(time(NULL));
|
||||
|
||||
std::vector<Shin::NeuronNetwork::Problem*> p;
|
||||
|
||||
p.push_back(new X(std::vector<float>({0,0})));
|
||||
|
||||
p.push_back(new X(std::vector<float>({1,1})));
|
||||
|
||||
p.push_back(new X(std::vector<float>({1,0})));
|
||||
p.push_back(new X(std::vector<float>({0,1})));
|
||||
|
||||
Shin::NeuronNetwork::FeedForward q({2,1});
|
||||
Shin::NeuronNetwork::Learning::Reinforcement b(q);
|
||||
int i=0;
|
||||
double targetQuality=0.5;
|
||||
b.setQualityFunction(
|
||||
[](const Shin::NeuronNetwork::Problem &pr,const Shin::NeuronNetwork::Solution &s)->float
|
||||
{
|
||||
if(pr[0]==1 && pr[1]==1)
|
||||
{
|
||||
//ocekavame 1
|
||||
int e=(s[0]-0.80)*15.0;//+(abs(s[1])-0.5)*100.0;
|
||||
return e;
|
||||
}else
|
||||
{
|
||||
//ocekavame 0
|
||||
int e=(0.20-s[0])*15.0;//+(0.4-abs(s[1]))*100.0;
|
||||
return e;
|
||||
}
|
||||
return 1.0;
|
||||
});
|
||||
for(i=0;i < 500000000;i++)
|
||||
{
|
||||
double err=b.learnSet(p);
|
||||
|
||||
if(i%100000==0)
|
||||
srand(time(NULL));
|
||||
if(err > targetQuality||i%1000==0)
|
||||
{
|
||||
std::cerr << i << " ("<< err <<").\n";
|
||||
for(int j=0;j<4;j++)
|
||||
{
|
||||
std::cerr << j%4 <<". FOR: [" << p[j%4]->operator[](0) << "," <<p[j%4]->operator[](0) << "] res: " << q.solve(*p[j%4])[0] << "\n";
|
||||
}
|
||||
}
|
||||
if(err >targetQuality)
|
||||
break;
|
||||
}
|
||||
|
||||
/* int i=0;
|
||||
std::cerr << i%4 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
|
||||
|
||||
for(int i=0;i<2000;i++)sa
|
||||
{
|
||||
b.teach(p[i%2],s[i%2]);
|
||||
std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
|
||||
}
|
||||
b.debugOn();
|
||||
for(int i=0;i<2;i++)
|
||||
{
|
||||
b.teach(p[i%2],s[i%2]);
|
||||
std::cerr << i%4 <<". FOR: [" << p[i%4].representation()[0] << "," <<p[i%4].representation()[0] << "] res: " << q.solve(p[i%4])[0] << " should be " <<
|
||||
s[i%4][0]<<"\n";
|
||||
}
|
||||
b.debugOff();*/
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/Reinforcement"
|
||||
#include "../src/NeuronNetwork/Learning/OpticalBackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :Problem(a) {}
|
||||
X(const std::vector<float> &a):Problem() {data=a;}
|
||||
};
|
||||
|
||||
|
||||
int main()
|
||||
{
|
||||
srand(time(NULL));
|
||||
for (int test=0;test<3;test++)
|
||||
{
|
||||
Shin::NeuronNetwork::FeedForward q({2,4,1});
|
||||
Shin::NeuronNetwork::Learning::Reinforcement b(q);
|
||||
//b.setPropagator(new Shin::NeuronNetwork::Learning::OpticalBackPropagation(q));
|
||||
b.getPropagator().setLearningCoeficient(0.4);
|
||||
//b.getPropagator().allowEntropy();
|
||||
double targetQuality =2.9;
|
||||
if(test==2)
|
||||
{
|
||||
targetQuality =1.62;
|
||||
std::cerr << "Testing with OBP ...\n";
|
||||
|
||||
b.setPropagator(new Shin::NeuronNetwork::Learning::OpticalBackPropagation(q));
|
||||
b.getPropagator().setLearningCoeficient(0.5);
|
||||
}
|
||||
b.setQualityFunction(
|
||||
[](const Shin::NeuronNetwork::Problem &p,const Shin::NeuronNetwork::Solution &s)->float
|
||||
{
|
||||
float expect=0.0;
|
||||
if(p[0] && p[1])
|
||||
expect=0;
|
||||
else if(p[0] && !p[1])
|
||||
expect=1;
|
||||
else if(!p[0] && !p[1])
|
||||
expect=0;
|
||||
else if(!p[0] && p[1])
|
||||
expect=1;
|
||||
|
||||
// std::cerr << "expected: " << expect << " got " << s[0];
|
||||
|
||||
if(expect==0)
|
||||
{
|
||||
expect=0.3-abs(s[0]);
|
||||
}else
|
||||
{
|
||||
expect=s[0]-0.7;
|
||||
}
|
||||
|
||||
// std::cerr << " returnning " << expect*5.0 << "\n";
|
||||
|
||||
return expect*19.0;
|
||||
});
|
||||
|
||||
std::vector<Shin::NeuronNetwork::Problem*> p;
|
||||
|
||||
p.push_back(new X(std::vector<float>({0,0})));
|
||||
p.push_back( new X(std::vector<float>({1,0})));
|
||||
p.push_back( new X(std::vector<float>({0,1})));
|
||||
p.push_back(new X(std::vector<float>({1,1})));
|
||||
|
||||
if(test==1)
|
||||
{
|
||||
std::cerr << "Testing with entropy ...\n";
|
||||
b.getPropagator().allowNoise();
|
||||
}else
|
||||
{
|
||||
std::cerr << "Testing without entropy ...\n";
|
||||
}
|
||||
|
||||
for(int i=0;i < 500000000;i++)
|
||||
// for(int i=0;i < 5;i++)
|
||||
{
|
||||
double err=b.learnSet(p);
|
||||
if(i%100000==0)
|
||||
srand(time(NULL));
|
||||
if(i%200000==0 || err > targetQuality)
|
||||
{
|
||||
std::cerr << i << " ("<< err <<").\n";
|
||||
for(int j=0;j<4;j++)
|
||||
{
|
||||
std::cerr << "\t" << i%4 <<". FOR: [" << p[j%4]->operator[](0) << "," <<p[j%4]->operator[](1) << "] res: " <<
|
||||
q.solve(*p[j%4])[0] << "\n";
|
||||
}
|
||||
}
|
||||
if(err >targetQuality)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
#include "../src/NeuronNetwork/Learning/QLearning.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :Problem(a) {}
|
||||
X(const std::vector<float> &a):Problem() {data=a;}
|
||||
};
|
||||
|
||||
float atof(char *s)
|
||||
{
|
||||
int f, m, sign, d=1;
|
||||
f = m = 0;
|
||||
|
||||
sign = (s[0] == '-') ? -1 : 1;
|
||||
if (s[0] == '-' || s[0] == '+') s++;
|
||||
|
||||
for (; *s != '.' && *s; s++) {
|
||||
f = (*s-'0') + f*10;
|
||||
}
|
||||
if (*s == '.')
|
||||
for (++s; *s; s++) {
|
||||
m = (*s-'0') + m*10;
|
||||
d *= 10;
|
||||
}
|
||||
return sign*(f + (float)m/d);
|
||||
}
|
||||
|
||||
float AA=10;
|
||||
float getQuality(X& p, int action)
|
||||
{
|
||||
if((p[0]==0&& p[1]==0) ||(p[0]==1&& p[1]==1)) //should be 0
|
||||
{
|
||||
return action==1?-AA:AA;
|
||||
}else // should be 1
|
||||
{
|
||||
return action==0?-AA:AA;
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
srand(time(NULL));
|
||||
|
||||
Shin::NeuronNetwork::Learning::QLearning l(2,45,2);
|
||||
if(argc==4 && argv[3][0]=='o')
|
||||
{
|
||||
std::cerr << "USING Optical Backpropagation\n";
|
||||
l.opticalBackPropagation();
|
||||
}
|
||||
if(argc>=3)
|
||||
{
|
||||
std::cerr << "Setting learning coefficients to:" << atof(argv[1]) << "," << atof(argv[2]) << "\n";
|
||||
l.setLearningCoeficient(atof(argv[1]),atof(argv[2]));
|
||||
}
|
||||
std::vector <std::pair<Shin::NeuronNetwork::Solution,Shin::NeuronNetwork::Problem>> p1x;
|
||||
|
||||
std::vector <X> states;
|
||||
states.push_back(X(std::vector<float>({1,0})));
|
||||
states.push_back(X(std::vector<float>({0,0})));
|
||||
states.push_back(X(std::vector<float>({1,1})));
|
||||
states.push_back(X(std::vector<float>({0,1})));
|
||||
|
||||
unsigned long step=0;
|
||||
double quality=0;
|
||||
while(step< 600000 && quality < (3.9*AA))
|
||||
{
|
||||
quality=0;
|
||||
if(step%10000==0)
|
||||
std::cerr << "STEP " << step << "\n";
|
||||
for(unsigned i=0;i<states.size();i++)
|
||||
{
|
||||
int choice=l.getChoice(states[i]);
|
||||
l.learn(states[i],choice,quality);
|
||||
}
|
||||
for(unsigned i=0;i<states.size();i++)
|
||||
{
|
||||
int choice=l.getChoice(states[i]);
|
||||
quality+=getQuality(states[i],choice);
|
||||
if(step%10000==0)
|
||||
{
|
||||
Shin::NeuronNetwork::Solution sol=l.getSolution(states[i]);
|
||||
std::cerr << "\tState: [" << states[i][0] << "," << states[i][1] << "] Q-function: [" << sol[0] << "," <<sol[1] << "] Action " << choice << "\n";
|
||||
}
|
||||
}
|
||||
step++;
|
||||
}
|
||||
std::cerr << step << "\n";
|
||||
for(unsigned i=0;i<states.size();i++)
|
||||
{
|
||||
Shin::NeuronNetwork::Solution sol=l.getSolution(states[i]);
|
||||
int choice=l.getChoice(states[i]);
|
||||
std::cerr << "State: [" << states[i][0] << "," << states[i][1] << "] Q-function: [" << sol[0] << "," <<sol[1] << "] Action " << choice << "\n";
|
||||
}
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
#include "../src/NeuralNetwork/FeedForward"
|
||||
#include "../src/NeuralNetwork/Learning/BackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
//typedef Shin::NeuronNetwork::Problem X;
|
||||
|
||||
class X: public Shin::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :Problem(a) {}
|
||||
X(const std::vector<float> &a):Problem() {for(auto q:a){ data.push_back(q);}}
|
||||
protected:
|
||||
};
|
||||
int main(int argc,char**)
|
||||
{
|
||||
srand(time(NULL));
|
||||
std::vector<Shin::Solution> s;
|
||||
std::vector<X> p;
|
||||
|
||||
p.push_back(X(std::vector<float>({0,0})));
|
||||
s.push_back(Shin::Solution(std::vector<float>({0.4,0.3,0.2,0.1})));
|
||||
p.push_back(X(std::vector<float>({0,0.5})));
|
||||
s.push_back(Shin::Solution(std::vector<float>({0.6,0.3,0.2,0.5})));
|
||||
p.push_back(X(std::vector<float>({0.4,0.5})));
|
||||
s.push_back(Shin::Solution(std::vector<float>({0.4,0.4,0.2,0.8})));
|
||||
Shin::NeuralNetwork::FeedForward q({2,4,4,4},1.0);
|
||||
Shin::NeuralNetwork::Learning::BackPropagation bp(q);
|
||||
bp.setLearningCoeficient(0.2);
|
||||
for(int i=0;i<3;i++)
|
||||
{
|
||||
Shin::Solution sp =q.solve(p[i]);
|
||||
std::cerr << sp[0] << "," << sp[1] << "," << sp[2] << "," << sp[3] << "\n";
|
||||
}
|
||||
for(int i=0;i<4;i++)
|
||||
{
|
||||
for(int j=0;j<3;j++)
|
||||
{
|
||||
bp.teach(p[j],s[j]);
|
||||
}
|
||||
}
|
||||
std::cerr << "XXXXXXXXXXXX\n";
|
||||
for(int i=0;i<3;i++)
|
||||
{
|
||||
Shin::Solution sp =q.solve(p[i]);
|
||||
std::cerr << sp[0] << "," << sp[1] << "," << sp[2] << "," << sp[3] << "\n";
|
||||
}
|
||||
}
|
||||
30
tests/recurrent.cpp
Normal file
30
tests/recurrent.cpp
Normal file
@@ -0,0 +1,30 @@
|
||||
#include <NeuralNetwork/Recurrent/Network.h>
|
||||
|
||||
#include <iostream>
|
||||
int main() {
|
||||
NeuralNetwork::Recurrent::Network a(2,1,1);
|
||||
/* a.getNeurons()[3].setWeight(a.getNeurons()[2],0.00000001565598595);
|
||||
a.getNeurons()[2].setWeight(a.getNeurons()[3],0.00000001565598595);
|
||||
a.getNeurons()[3].setWeight(a.getNeurons()[1],0.00000001565598595);
|
||||
a.getNeurons()[3].setWeight(a.getNeurons()[0],0.00000001565598595);
|
||||
|
||||
a.computeOutput({0.5,0});
|
||||
|
||||
std::cout << a;
|
||||
|
||||
NeuralNetwork::Recurrent::Network b(a.stringify());
|
||||
*/
|
||||
|
||||
a.getNeurons()[3].setWeight(a.getNeurons()[0],0.05);
|
||||
a.getNeurons()[3].setWeight(a.getNeurons()[1],0.05);
|
||||
a.getNeurons()[3].setWeight(a.getNeurons()[2],0.7);
|
||||
a.getNeurons()[2].setWeight(a.getNeurons()[3],0.1);
|
||||
|
||||
std::cout << a;
|
||||
|
||||
for(int i=0;i<40;i++) {
|
||||
std::cout << a.computeOutput({1,0.7})[0] << "\n";
|
||||
}
|
||||
std::cout << a;
|
||||
|
||||
}
|
||||
Reference in New Issue
Block a user