code cleanning and mistake in HyperbolicTangent
This commit is contained in:
@@ -21,7 +21,7 @@ FFNeuron& FFLayer::operator[](const size_t& neuron)
|
|||||||
neurons=new FFNeuron*[layerSize];
|
neurons=new FFNeuron*[layerSize];
|
||||||
for(size_t i=0;i<layerSize;i++)
|
for(size_t i=0;i<layerSize;i++)
|
||||||
{
|
{
|
||||||
neurons[i]=new FFNeuron(potentials[i],weights[i],sums[i],inputs[i],lambda);
|
neurons[i]=new FFNeuron(potentials[i],weights[i],outputs[i],inputs[i],lambda,function);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -39,10 +39,10 @@ FeedForward::FeedForward(std::initializer_list<size_t> s, double lam): ACyclicNe
|
|||||||
weights= new float**[s.size()];
|
weights= new float**[s.size()];
|
||||||
potentials= new float*[s.size()];
|
potentials= new float*[s.size()];
|
||||||
layerSizes= new size_t[s.size()];
|
layerSizes= new size_t[s.size()];
|
||||||
sums= new float*[s.size()];
|
outputs= new float*[s.size()];
|
||||||
inputs= new float*[s.size()];
|
inputs= new float*[s.size()];
|
||||||
int i=0;
|
int i=0;
|
||||||
int prev_size=1;
|
register int prev_size=1;
|
||||||
for(int layeSize:s) // TODO rename
|
for(int layeSize:s) // TODO rename
|
||||||
{
|
{
|
||||||
transfer[i]= new TransferFunction::Sigmoid(lam);
|
transfer[i]= new TransferFunction::Sigmoid(lam);
|
||||||
@@ -54,11 +54,11 @@ FeedForward::FeedForward(std::initializer_list<size_t> s, double lam): ACyclicNe
|
|||||||
layerSizes[i]=layeSize;
|
layerSizes[i]=layeSize;
|
||||||
weights[i]= new float*[layeSize];
|
weights[i]= new float*[layeSize];
|
||||||
potentials[i]= new float[layeSize];
|
potentials[i]= new float[layeSize];
|
||||||
sums[i]= new float[layeSize];
|
outputs[i]= new float[layeSize];
|
||||||
inputs[i]= new float[layeSize];
|
inputs[i]= new float[layeSize];
|
||||||
|
|
||||||
potentials[i][0]=1.0;
|
potentials[i][0]=1.0;
|
||||||
sums[i][0]=1.0;
|
outputs[i][0]=1.0;
|
||||||
for (int j=1;j<layeSize;j++)
|
for (int j=1;j<layeSize;j++)
|
||||||
{
|
{
|
||||||
potentials[i][j]=1.0;
|
potentials[i][j]=1.0;
|
||||||
@@ -84,13 +84,13 @@ FeedForward::~FeedForward()
|
|||||||
}
|
}
|
||||||
delete[] weights[i];
|
delete[] weights[i];
|
||||||
delete[] potentials[i];
|
delete[] potentials[i];
|
||||||
delete[] sums[i];
|
delete[] outputs[i];
|
||||||
delete[] inputs[i];
|
delete[] inputs[i];
|
||||||
}
|
}
|
||||||
delete[] weights;
|
delete[] weights;
|
||||||
delete[] potentials;
|
delete[] potentials;
|
||||||
delete[] layerSizes;
|
delete[] layerSizes;
|
||||||
delete[] sums;
|
delete[] outputs;
|
||||||
delete[] inputs;
|
delete[] inputs;
|
||||||
}
|
}
|
||||||
if(ffLayers !=nullptr)
|
if(ffLayers !=nullptr)
|
||||||
@@ -156,15 +156,15 @@ void FeedForward::solvePart(float *newSolution, register size_t begin, size_t en
|
|||||||
{
|
{
|
||||||
tmp+=sol[k]*weights[layer][j][k];
|
tmp+=sol[k]*weights[layer][j][k];
|
||||||
}
|
}
|
||||||
newSolution[j]=transfer[layer]->operator()(tmp);
|
|
||||||
inputs[layer][j]=tmp;
|
inputs[layer][j]=tmp;
|
||||||
|
newSolution[j]=transfer[layer]->operator()(tmp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Shin::Solution FeedForward::solve(const Shin::Problem& p)
|
Shin::Solution FeedForward::solve(const Shin::Problem& p)
|
||||||
{
|
{
|
||||||
register float* sol=sums[0];
|
register float* sol=outputs[0];
|
||||||
|
|
||||||
sol[0]=1;
|
sol[0]=1;
|
||||||
for(size_t i=0;i<p.size();i++)
|
for(size_t i=0;i<p.size();i++)
|
||||||
@@ -173,7 +173,7 @@ Shin::Solution FeedForward::solve(const Shin::Problem& p)
|
|||||||
register size_t prevSize=layerSizes[0];
|
register size_t prevSize=layerSizes[0];
|
||||||
for(register size_t i=1;i<layers;i++)
|
for(register size_t i=1;i<layers;i++)
|
||||||
{
|
{
|
||||||
float* newSolution= sums[i];
|
float* newSolution= outputs[i];
|
||||||
if(threads > 1 && (layerSizes[i] > 700 ||prevSize > 700)) // 700 is an guess about actual size, when creating thread has some speedup
|
if(threads > 1 && (layerSizes[i] > 700 ||prevSize > 700)) // 700 is an guess about actual size, when creating thread has some speedup
|
||||||
{
|
{
|
||||||
std::vector<std::thread> th;
|
std::vector<std::thread> th;
|
||||||
@@ -211,7 +211,7 @@ FFLayer& FeedForward::operator[](const size_t& l)
|
|||||||
ffLayers=new FFLayer*[layers];
|
ffLayers=new FFLayer*[layers];
|
||||||
for(size_t i=0;i<layers;i++)
|
for(size_t i=0;i<layers;i++)
|
||||||
{
|
{
|
||||||
ffLayers[i]=new FFLayer(layerSizes[i],potentials[i],weights[i],sums[i],inputs[i],lambda);
|
ffLayers[i]=new FFLayer(layerSizes[i],potentials[i],weights[i],outputs[i],inputs[i],lambda,*transfer[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,7 @@
|
|||||||
#include "Network"
|
#include "Network"
|
||||||
#include "TransferFunction/Sigmoid.h"
|
#include "TransferFunction/Sigmoid.h"
|
||||||
#include "TransferFunction/TransferFunction.h"
|
#include "TransferFunction/TransferFunction.h"
|
||||||
|
#include "TransferFunction/HyperbolicTangent.h"
|
||||||
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <initializer_list>
|
#include <initializer_list>
|
||||||
@@ -30,25 +31,26 @@ namespace NeuralNetwork
|
|||||||
class FFNeuron : public Neuron
|
class FFNeuron : public Neuron
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
inline FFNeuron(float &pot, float *w, float &outputF, float &i,float lam,TransferFunction::TransferFunction &fun):function(fun),potential(pot),weights(w),out(outputF),inputs(i),lambda(lam) { }
|
||||||
|
|
||||||
FFNeuron() = delete;
|
FFNeuron() = delete;
|
||||||
FFNeuron(const FFNeuron&) = delete;
|
FFNeuron(const FFNeuron&) = delete;
|
||||||
FFNeuron& operator=(const FFNeuron&) = delete;
|
FFNeuron& operator=(const FFNeuron&) = delete;
|
||||||
|
|
||||||
FFNeuron(float &pot, float *w, float &s, float &i,float lam):potential(pot),weights(w),sum(s),inputs(i),lambda(lam) { }
|
|
||||||
|
|
||||||
inline virtual float getPotential() const override {return potential;}
|
inline virtual float getPotential() const override {return potential;}
|
||||||
inline virtual void setPotential(const float& p) override { potential=p;}
|
inline virtual void setPotential(const float& p) override { potential=p;}
|
||||||
|
|
||||||
inline virtual float getWeight(const size_t& i ) const override { return weights[i];}
|
inline virtual float getWeight(const size_t& i ) const override { return weights[i];}
|
||||||
inline virtual void setWeight(const size_t& i,const float &p) override { weights[i]=p; }
|
inline virtual void setWeight(const size_t& i,const float &p) override { weights[i]=p; }
|
||||||
|
|
||||||
inline virtual float output() const override { return sum; }
|
inline virtual float output() const override { return out; }
|
||||||
inline virtual float input() const override { return inputs; }
|
inline virtual float input() const override { return inputs; }
|
||||||
inline virtual float derivatedOutput() const override { return lambda*output()*(1.0-output()); }
|
inline virtual float derivatedOutput() const override { return function.derivatedOutput(inputs,out); }
|
||||||
protected:
|
protected:
|
||||||
|
TransferFunction::TransferFunction &function;
|
||||||
float &potential;
|
float &potential;
|
||||||
float *weights;
|
float *weights;
|
||||||
float ∑
|
float &out;
|
||||||
float &inputs;
|
float &inputs;
|
||||||
float lambda;
|
float lambda;
|
||||||
private:
|
private:
|
||||||
@@ -57,7 +59,7 @@ namespace NeuralNetwork
|
|||||||
class FFLayer: public Layer
|
class FFLayer: public Layer
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
FFLayer(size_t s, float *p,float **w,float *su,float *in,float lam): layerSize(s),potentials(p),weights(w),sums(su),inputs(in),lambda(lam) {}
|
inline FFLayer(size_t s, float *p,float **w,float *out,float *in,float lam,TransferFunction::TransferFunction &fun): function(fun), layerSize(s),potentials(p),weights(w),outputs(out),inputs(in),lambda(lam) {}
|
||||||
~FFLayer();
|
~FFLayer();
|
||||||
|
|
||||||
FFLayer(const FFLayer &) = delete;
|
FFLayer(const FFLayer &) = delete;
|
||||||
@@ -66,11 +68,12 @@ namespace NeuralNetwork
|
|||||||
virtual FFNeuron& operator[](const size_t& layer) override;
|
virtual FFNeuron& operator[](const size_t& layer) override;
|
||||||
inline virtual size_t size() const override {return layerSize;};
|
inline virtual size_t size() const override {return layerSize;};
|
||||||
protected:
|
protected:
|
||||||
|
TransferFunction::TransferFunction &function;
|
||||||
FFNeuron **neurons=nullptr;
|
FFNeuron **neurons=nullptr;
|
||||||
size_t layerSize;
|
size_t layerSize;
|
||||||
float *potentials;
|
float *potentials;
|
||||||
float **weights;
|
float **weights;
|
||||||
float *sums;
|
float *outputs;
|
||||||
float *inputs;
|
float *inputs;
|
||||||
float lambda;
|
float lambda;
|
||||||
};
|
};
|
||||||
@@ -93,7 +96,7 @@ namespace NeuralNetwork
|
|||||||
FFLayer **ffLayers=nullptr;
|
FFLayer **ffLayers=nullptr;
|
||||||
float ***weights=nullptr;
|
float ***weights=nullptr;
|
||||||
float **potentials=nullptr;
|
float **potentials=nullptr;
|
||||||
float **sums=nullptr;
|
float **outputs=nullptr;
|
||||||
float **inputs=nullptr;
|
float **inputs=nullptr;
|
||||||
TransferFunction::TransferFunction **transfer=nullptr;
|
TransferFunction::TransferFunction **transfer=nullptr;
|
||||||
size_t *layerSizes=nullptr;
|
size_t *layerSizes=nullptr;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#ifndef __TRAN_SIGMOID_H_
|
#ifndef __TRAN_HYPTAN_H_
|
||||||
#define __TRAN_SIGMOID_H_
|
#define __TRAN_HYPTAN_H_
|
||||||
|
|
||||||
#include "./TransferFunction.h"
|
#include "./TransferFunction.h"
|
||||||
|
|
||||||
@@ -13,7 +13,7 @@ namespace TransferFunction
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
HyperbolicTangent() {}
|
HyperbolicTangent() {}
|
||||||
inline virtual float derivatedOutput(const float&,const float &output) override { return 1-pow(output); }
|
inline virtual float derivatedOutput(const float&,const float &output) override { return 1-pow(output,2); }
|
||||||
inline virtual float operator()(const float &x) override { return tanh(x); };
|
inline virtual float operator()(const float &x) override { return tanh(x); };
|
||||||
protected:
|
protected:
|
||||||
};
|
};
|
||||||
|
|||||||
Reference in New Issue
Block a user