new function to support LSTM Unit

This commit is contained in:
2016-01-27 23:40:32 +01:00
parent d424d87535
commit 3c26c9641c
7 changed files with 97 additions and 14 deletions

View File

@@ -0,0 +1,26 @@
#pragma once
#include "./ActivationFunction.h"
namespace NeuralNetwork {
namespace ActivationFunction {
class Linear: public ActivationFunction {
public:
Linear(const float &lambdaP=1.0): lambda(lambdaP) {}
inline virtual float derivatedOutput(const float &,const float &) override { return lambda; }
inline virtual float operator()(const float &x) override { return x*lambda; };
virtual ActivationFunction* clone() const override {
return new Linear(lambda);
}
virtual std::string stringify() const override {
return "{ \"class\": \"NeuralNetwork::ActivationFunction::Linear\", \"lamba\" : "+std::to_string(lambda)+"}";
}
protected:
float lambda;
};
}
}

View File

@@ -10,7 +10,7 @@ namespace BasisFunction {
class BasisFunction {
public:
virtual ~BasisFunction() {}
virtual float operator()(const std::vector<float>& weights, const std::vector<float>& input)=0;
virtual float operator()(const std::vector<float>& weights, const std::vector<float>& input) const =0;
/**
* @brief Function returns clone of object

View File

@@ -17,7 +17,7 @@ namespace BasisFunction {
public:
Linear() {}
inline virtual float computeStreaming(const std::vector<float>& weights, const std::vector<float>& input) override {
inline virtual float computeStreaming(const std::vector<float>& weights, const std::vector<float>& input) const override {
size_t inputSize=input.size();
size_t alignedPrev=inputSize-inputSize%4;
@@ -46,7 +46,7 @@ namespace BasisFunction {
return partialSolution.f[0];
}
inline virtual float compute(const std::vector<float>& weights, const std::vector<float>& input) override {
inline virtual float compute(const std::vector<float>& weights, const std::vector<float>& input) const override {
register float tmp = 0;
size_t inputSize=input.size();
for(size_t k=0;k<inputSize;k++) {

View File

@@ -0,0 +1,34 @@
#pragma once
#include "./BasisFunction.h"
namespace NeuralNetwork {
namespace BasisFunction {
class Product: public BasisFunction {
public:
Product() {}
/**
* @brief function computes product of inputs, where weight > 0.5
*/
inline virtual float operator()(const std::vector<float>& weights, const std::vector<float>& input) const override {
float product=1.0;
for(size_t i=0;i<weights.size();i++) {
if(weights[i] > 0.5)
product=product*input[i];
}
return product;
}
virtual Product* clone() const override {
return new Product();
}
virtual std::string stringify() const override {
return "{ \"class\": \"NeuralNetwork::BasisFunction::Product\" }";
}
};
}
}

View File

@@ -13,11 +13,13 @@ namespace BasisFunction {
float f[4];
};
virtual float operator()(const std::vector<float>& weights, const std::vector<float>& input) override {
virtual float operator()(const std::vector<float>& weights, const std::vector<float>& input) const override {
return computeStreaming(weights,input);
}
virtual float computeStreaming(const std::vector<float>& weights, const std::vector<float>& input) =0;
virtual float compute(const std::vector<float>& weights, const std::vector<float>& input) =0;
virtual float computeStreaming(const std::vector<float>& weights, const std::vector<float>& input) const =0;
virtual float compute(const std::vector<float>& weights, const std::vector<float>& input) const =0;
};
}
}

View File

@@ -1,6 +1,7 @@
#include <NeuralNetwork/ActivationFunction/Heaviside.h>
#include <NeuralNetwork/ActivationFunction/Sigmoid.h>
#include <NeuralNetwork/ActivationFunction/HyperbolicTangent.h>
#include <NeuralNetwork/ActivationFunction/Linear.h>
#include <iostream>
#include <cassert>
@@ -26,14 +27,14 @@ int main() {
{
NeuralNetwork::ActivationFunction::Sigmoid s(0.7);
assert(s(0.1) > 0.517483);
assert(s(0.1) < 0.51750);
assert(s(0.1) > 0.482407);
assert(s(0.1) < 0.482607);
assert(s(10) > 0.998989);
assert(s(10) < 0.999189);
assert(s(10) > 0.000901051);
assert(s(10) < 0.000921051);
}
{
NeuralNetwork::ActivationFunction::Sigmoid s(5);
NeuralNetwork::ActivationFunction::Sigmoid s(-5);
assert(s(0.1) > 0.622359);
assert(s(0.1) < 0.622559);
@@ -41,7 +42,7 @@ int main() {
assert(s(0.7) < 0.970788);
}
{
NeuralNetwork::ActivationFunction::Sigmoid s(0.7);
NeuralNetwork::ActivationFunction::Sigmoid s(-0.7);
U.a[0]=0.1;
U.a[1]=10;
U.v=s(U.v);
@@ -52,8 +53,20 @@ int main() {
assert(U.a[1] > 0.998989);
assert(U.a[1] < 0.999189);
}
{
NeuralNetwork::ActivationFunction::Linear s(1.0);
assert(s(0.5) > 0.4999);
assert(s(0.5) < 0.5001);
std::cout << "OK" << std::endl;
assert(s(0.0) == 0.0);
}
{
NeuralNetwork::ActivationFunction::Linear s(0.7);
assert(s(0.0) == 0.0);
assert(s(1.0) > 0.6999);
assert(s(1.0) < 0.7001);
}
return 0;
}

View File

@@ -1,4 +1,5 @@
#include <NeuralNetwork/BasisFunction/Linear.h>
#include <NeuralNetwork/BasisFunction/Product.h>
#include <iostream>
#include <cassert>
@@ -33,6 +34,14 @@ int main() {
assert(220.0==l.computeStreaming(w,w));
assert(220.0==l.compute(w,w));
}
{
NeuralNetwork::BasisFunction::Product l;
std::vector<float> w({0,0.501,1});
std::vector<float> i({0,0.2,0.3});
assert(l(w,i) > 0.05999);
assert(l(w,i) < 0.06001);
}
/*
std::vector<float> w;
std::vector<float> i;
@@ -61,6 +70,5 @@ int main() {
std::cout << "SSE :" << diff.count() << " s\n";
}
*/
std::cout <<"OK" << std::endl;
}