initial cleaning
This commit is contained in:
@@ -2,31 +2,219 @@
|
||||
|
||||
using namespace Shin::NeuronNetwork;
|
||||
|
||||
FeedForwardNetwork::~FeedForwardNetwork()
|
||||
FFLayer::~FFLayer()
|
||||
{
|
||||
for(Layer *l:layers)
|
||||
if(neurons!=nullptr)
|
||||
{
|
||||
delete l;
|
||||
for(size_t i=0;i<layerSize;i++)
|
||||
{
|
||||
delete neurons[i];
|
||||
}
|
||||
delete[] neurons;
|
||||
}
|
||||
}
|
||||
|
||||
Solution FeedForwardNetwork::solve(const Problem& p)
|
||||
FFNeuron& FFLayer::operator[](size_t neuron)
|
||||
{
|
||||
Solution s=Solution(p);
|
||||
for (Layer *l:layers)
|
||||
if(neurons==nullptr)
|
||||
{
|
||||
s=l->solve(s);
|
||||
neurons=new FFNeuron*[layerSize];
|
||||
for(size_t i=0;i<layerSize;i++)
|
||||
{
|
||||
neurons[i]=new FFNeuron(potentials[i],weights[i],sums[i],inputs[i],lambda);
|
||||
}
|
||||
}
|
||||
return s;
|
||||
return *neurons[neuron];
|
||||
}
|
||||
|
||||
const Layer* FeedForwardNetwork::operator[](int layer)
|
||||
FeedForward::FeedForward(std::initializer_list< int > s, double lam): ACyclicNetwork(lam),layers(s.size())
|
||||
{
|
||||
return layers[layer];
|
||||
weights= new float**[s.size()];
|
||||
potentials= new float*[s.size()];
|
||||
layerSizes= new size_t[s.size()];
|
||||
sums= new float*[s.size()+1];
|
||||
inputs= new float*[s.size()];
|
||||
int i=0;
|
||||
int prev_size=1;
|
||||
for(int layeSize:s) // TODO rename
|
||||
{
|
||||
layeSize+=1;
|
||||
if(i==0)
|
||||
{
|
||||
prev_size=layeSize;
|
||||
sums[0]= new float[layeSize];
|
||||
sums[0][0]=1.0;
|
||||
}
|
||||
layerSizes[i]=layeSize;
|
||||
weights[i]= new float*[layeSize];
|
||||
potentials[i]= new float[layeSize];
|
||||
sums[i+1]= new float[layeSize];
|
||||
inputs[i]= new float[layeSize];
|
||||
potentials[i][0]=1.0;
|
||||
sums[i+1][0]=1.0;
|
||||
for (int j=1;j<layeSize;j++)
|
||||
{
|
||||
potentials[i][j]=1.0;
|
||||
weights[i][j]= new float[prev_size];
|
||||
for(int k=0;k<prev_size;k++)
|
||||
{
|
||||
weights[i][j][k]=1.0-((float)(rand()%2001))/1000.0;
|
||||
}
|
||||
}
|
||||
i++;
|
||||
prev_size=layeSize;
|
||||
}
|
||||
}
|
||||
|
||||
void FeedForwardNetwork::addLayer(int neurons)
|
||||
FeedForward::~FeedForward()
|
||||
{
|
||||
layers.push_back(new Layer(neurons));
|
||||
if(weights != nullptr)
|
||||
{
|
||||
for(size_t i=0;i<layers;i++)
|
||||
{
|
||||
for (size_t j=1;j<layerSizes[i];j++)
|
||||
{
|
||||
delete[] weights[i][j];
|
||||
}
|
||||
delete[] weights[i];
|
||||
delete[] potentials[i];
|
||||
delete[] sums[i];
|
||||
delete[] inputs[i];
|
||||
}
|
||||
delete[] sums[layers];
|
||||
delete[] weights;
|
||||
delete[] potentials;
|
||||
delete[] layerSizes;
|
||||
delete[] sums;
|
||||
delete[] inputs;
|
||||
}
|
||||
if(ffLayers !=nullptr)
|
||||
{
|
||||
for(size_t i=0;i<layers;i++)
|
||||
{
|
||||
delete ffLayers[i];
|
||||
}
|
||||
delete[] ffLayers;
|
||||
}
|
||||
}
|
||||
|
||||
void FeedForward::solvePart(float *newSolution, register size_t begin, size_t end,size_t prevSize, float *sol,size_t layer)
|
||||
{
|
||||
if(prevSize >4)
|
||||
{
|
||||
__m128 partialSolution;
|
||||
__m128 w;
|
||||
__m128 sols;
|
||||
__m128 temporaryConst1=_mm_set1_ps(1.0);
|
||||
__m128 temporaryConstLambda=_mm_set1_ps(-lambda);
|
||||
register size_t alignedPrev=prevSize>16?(prevSize-(prevSize%16)):0;
|
||||
for( size_t j=begin;j<end;j++)
|
||||
{
|
||||
partialSolution= _mm_setzero_ps();
|
||||
w=_mm_setzero_ps();
|
||||
for(register size_t k=alignedPrev;k<prevSize;k++)
|
||||
{
|
||||
w = _mm_load_ss(this->weights[layer][j]+k);
|
||||
sols = _mm_load_ss(sol+k);
|
||||
w=_mm_mul_ps(w,sols);
|
||||
partialSolution=_mm_add_ps(partialSolution,w);
|
||||
// w=_mm_shuffle_ps(w,w,3*2^0+0*2^2+1*2^4+2*2^6);
|
||||
// sols=_mm_shuffle_ps(sols,sols,3*2^0+0*2^2+1*2^4+2*2^6);
|
||||
}
|
||||
for(register size_t k=0;k<alignedPrev;k+=4)
|
||||
{
|
||||
w = _mm_load_ps(this->weights[layer][j]+k);
|
||||
//_mm_prefetch((char*)this->weights[layer][j]+k+4,_MM_HINT_T0);
|
||||
sols = _mm_load_ps(sol+k);
|
||||
w=_mm_mul_ps(w,sols);
|
||||
partialSolution=_mm_add_ps(partialSolution,w);
|
||||
}
|
||||
/* pre-SSE3 solution
|
||||
__m128 temp = _mm_add_ps(_mm_movehl_ps(foo128, foo128), foo128);
|
||||
float x;
|
||||
_mm_store_ss(&x, _mm_add_ss(temp, _mm_shuffle_ps(temp, 1)));
|
||||
*/
|
||||
partialSolution = _mm_hadd_ps(partialSolution, partialSolution);
|
||||
partialSolution = _mm_hadd_ps(partialSolution, partialSolution);
|
||||
_mm_store_ss(inputs[layer]+j,partialSolution);
|
||||
partialSolution=_mm_mul_ps(temporaryConstLambda,partialSolution); //-lambda*sol[k]
|
||||
partialSolution=exp_ps(partialSolution); //exp(sols)
|
||||
partialSolution= _mm_add_ps(partialSolution,temporaryConst1); //1+exp()
|
||||
partialSolution= _mm_div_ps(temporaryConst1,partialSolution);//1/....*/
|
||||
_mm_store_ss(newSolution+j,partialSolution);
|
||||
}
|
||||
}else
|
||||
{
|
||||
for( size_t j=begin;j<end;j++)
|
||||
{
|
||||
register float tmp=0;
|
||||
for(register size_t k=0;k<prevSize;k++)
|
||||
{
|
||||
tmp+=sol[k]*weights[layer][j][k];
|
||||
}
|
||||
newSolution[j]=(1.0/(1.0+exp(-lambda*tmp)));
|
||||
inputs[layer][j]=tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Solution FeedForward::solve(const Problem& p)
|
||||
{
|
||||
register float* sol=sums[1];
|
||||
|
||||
sums[0][0]=1;
|
||||
sol[0]=1;
|
||||
for(size_t i=0;i<p.size();i++)
|
||||
{
|
||||
sums[0][i+1]=p[i];
|
||||
sol[i+1]=p[i];
|
||||
}
|
||||
|
||||
register size_t prevSize=layerSizes[0];
|
||||
for(register size_t i=1;i<layers;i++)
|
||||
{
|
||||
float* newSolution= sums[i+1];
|
||||
if(threads > 1 && (layerSizes[i] > 700 ||prevSize > 700)) // 700 is an guess about actual size, when creating thread has some speedup
|
||||
{
|
||||
std::vector<std::thread> th;
|
||||
size_t s=1;
|
||||
size_t step =layerSizes[i]/threads;
|
||||
for(size_t t=1;t<=threads;t++)
|
||||
{
|
||||
//TODO do i need it to check?
|
||||
if(s>=layerSizes[i])
|
||||
break;
|
||||
th.push_back(std::thread([i,this,newSolution,prevSize,sol](size_t from, size_t to)->void{
|
||||
solvePart(newSolution,from,to,prevSize,sol,i);
|
||||
},s,t==threads?layerSizes[i]:s+step));//{}
|
||||
s+=step;
|
||||
}
|
||||
|
||||
for (auto& thr : th)
|
||||
thr.join();
|
||||
}else
|
||||
{
|
||||
solvePart(newSolution,1,layerSizes[i],prevSize,sol,i);
|
||||
}
|
||||
prevSize=layerSizes[i];
|
||||
sol=newSolution;
|
||||
}
|
||||
Solution ret;
|
||||
for(size_t i=1;i<prevSize;i++)
|
||||
{
|
||||
ret.push_back(sol[i]);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
FFLayer& FeedForward::operator[](size_t l)
|
||||
{
|
||||
if(ffLayers==nullptr)
|
||||
{
|
||||
ffLayers=new FFLayer*[layers];
|
||||
for(size_t i=0;i<layers;i++)
|
||||
{
|
||||
ffLayers[i]=new FFLayer(layerSizes[i],potentials[i],weights[i],sums[i+1],inputs[i],lambda);
|
||||
}
|
||||
}
|
||||
return *ffLayers[l];
|
||||
}
|
||||
|
||||
@@ -6,43 +6,98 @@
|
||||
#include "Neuron"
|
||||
#include "Network"
|
||||
|
||||
#include <cstdarg>
|
||||
#include <vector>
|
||||
#include <initializer_list>
|
||||
#include <thread>
|
||||
|
||||
#include <iostream>
|
||||
#include <math.h>
|
||||
|
||||
#include <mmintrin.h>
|
||||
#include <xmmintrin.h>
|
||||
#include <emmintrin.h>
|
||||
#include <xmmintrin.h>
|
||||
#include <pmmintrin.h>
|
||||
|
||||
#include "../sse_mathfun.h"
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
{
|
||||
|
||||
// template <typename _NT>
|
||||
class FeedForwardNetwork : public ACyclicNetwork
|
||||
class FFNeuron : public Neuron
|
||||
{
|
||||
public:
|
||||
FeedForwardNetwork(const FeedForwardNetwork &f):first(nullptr),last(nullptr),layers()
|
||||
{
|
||||
for(Layer *l:f.layers)
|
||||
{
|
||||
layers.push_back(new Layer(*l));
|
||||
last=layers[layers.size()-1];
|
||||
}
|
||||
first=layers[0];
|
||||
}
|
||||
FeedForwardNetwork operator=(const FeedForwardNetwork &f)=delete;
|
||||
template<typename... Args>inline FeedForwardNetwork(std::initializer_list<int> s):first(nullptr),last(nullptr),layers() { for(const int i:s) {addLayer(i);}}
|
||||
//inline FeedForwardNetwork(std::vector<int> q);
|
||||
~FeedForwardNetwork();
|
||||
|
||||
virtual Solution solve(const Problem& p) override;
|
||||
unsigned size() {return layers.size();}
|
||||
const Layer* operator[](int layer);
|
||||
FFNeuron() = delete;
|
||||
FFNeuron(const FFNeuron&) = delete;
|
||||
FFNeuron& operator=(const FFNeuron&) = delete;
|
||||
|
||||
FFNeuron(float &pot, float *w, float &s, float &i,float lam):potential(pot),weights(w),sum(s),inputs(i),lambda(lam) { }
|
||||
|
||||
inline virtual float getPotential() const override {return potential;}
|
||||
inline virtual void setPotential(float p) { potential=p;}
|
||||
|
||||
inline virtual float getWeight(size_t i ) const override { return weights[i];}
|
||||
inline virtual void setWeight(size_t i,float p) override { weights[i]=p; }
|
||||
|
||||
inline virtual float output() const { return sum; }
|
||||
inline virtual float input() const { return inputs; }
|
||||
inline virtual float derivatedOutput() const { return lambda*output()*(1.0-output()); }
|
||||
protected:
|
||||
void addLayer(int neurons);
|
||||
float &potential;
|
||||
float *weights;
|
||||
float ∑
|
||||
float &inputs;
|
||||
float lambda;
|
||||
private:
|
||||
};
|
||||
|
||||
class FFLayer: public Layer
|
||||
{
|
||||
public:
|
||||
FFLayer(size_t s, float *p,float **w,float *su,float *in,float lam): neurons(nullptr),layerSize(s),potentials(p),weights(w),sums(su),inputs(in),lambda(lam) {}
|
||||
~FFLayer();
|
||||
|
||||
FFLayer(const FFLayer &) = delete;
|
||||
FFLayer& operator=(const FFLayer &) = delete;
|
||||
|
||||
// inline virtual Neuron& operator[](size_t layer) override {return operator[](layer);};
|
||||
virtual FFNeuron& operator[](size_t layer) override;
|
||||
inline virtual size_t size() const override {return layerSize;};
|
||||
protected:
|
||||
FFNeuron **neurons;
|
||||
size_t layerSize;
|
||||
float *potentials;
|
||||
float **weights;
|
||||
float *sums;
|
||||
float *inputs;
|
||||
float lambda;
|
||||
};
|
||||
|
||||
class FeedForward:public ACyclicNetwork
|
||||
{
|
||||
public:
|
||||
FeedForward(std::initializer_list<int> s, double lam=Shin::NeuronNetwork::lambda);
|
||||
FeedForward(const FeedForward &f) = delete; //TODO
|
||||
FeedForward operator=(const FeedForward &f)=delete;
|
||||
virtual ~FeedForward();
|
||||
virtual Solution solve(const Problem& p) override;
|
||||
virtual size_t size() const override { return layers;};
|
||||
virtual FFLayer& operator[](size_t l) override;
|
||||
void setThreads(unsigned t) {threads=t;}
|
||||
protected:
|
||||
void solvePart(float *newSolution, size_t begin, size_t end,size_t prevSize, float* sol,size_t layer);
|
||||
private:
|
||||
Layer* first;
|
||||
Layer* last ;
|
||||
std::vector<Layer*> layers;
|
||||
FFLayer **ffLayers=nullptr;
|
||||
float ***weights=nullptr;
|
||||
float **potentials=nullptr;
|
||||
public:
|
||||
float **sums=nullptr;
|
||||
float **inputs=nullptr;
|
||||
private:
|
||||
size_t *layerSizes=nullptr;
|
||||
size_t layers;
|
||||
unsigned threads=1;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
./FeedForwardQuick.h
|
||||
@@ -1,183 +0,0 @@
|
||||
#include "FeedForwardQuick"
|
||||
#include <pmmintrin.h>
|
||||
|
||||
using namespace Shin::NeuronNetwork;
|
||||
|
||||
FFLayer::~FFLayer()
|
||||
{
|
||||
if(neurons!=nullptr)
|
||||
{
|
||||
for(size_t i=0;i<layerSize;i++)
|
||||
{
|
||||
delete neurons[i];
|
||||
}
|
||||
delete[] neurons;
|
||||
}
|
||||
}
|
||||
|
||||
FFNeuron* FFLayer::operator[](int neuron)
|
||||
{
|
||||
if(neurons==nullptr)
|
||||
{
|
||||
neurons=new FFNeuron*[layerSize];
|
||||
for(size_t i=0;i<layerSize;i++)
|
||||
{
|
||||
neurons[i]=new FFNeuron(potentials[i],weights[i],sums[i],inputs[i],lambda);
|
||||
}
|
||||
}
|
||||
return neurons[neuron];
|
||||
}
|
||||
|
||||
FeedForwardNetworkQuick::~FeedForwardNetworkQuick()
|
||||
{
|
||||
if(weights != nullptr)
|
||||
{
|
||||
for(size_t i=0;i<layers;i++)
|
||||
{
|
||||
for (size_t j=1;j<layerSizes[i];j++)
|
||||
{
|
||||
delete[] weights[i][j];
|
||||
}
|
||||
delete[] weights[i];
|
||||
delete[] potentials[i];
|
||||
delete[] sums[i];
|
||||
delete[] inputs[i];
|
||||
}
|
||||
delete[] sums[layers];
|
||||
delete[] weights;
|
||||
delete[] potentials;
|
||||
delete[] layerSizes;
|
||||
delete[] sums;
|
||||
delete[] inputs;
|
||||
}
|
||||
if(ffLayers !=nullptr)
|
||||
{
|
||||
for(size_t i=0;i<layers;i++)
|
||||
{
|
||||
delete ffLayers[i];
|
||||
}
|
||||
delete[] ffLayers;
|
||||
}
|
||||
}
|
||||
|
||||
void FeedForwardNetworkQuick::solvePart(float *newSolution, register size_t begin, size_t end,size_t prevSize, float *sol,size_t layer)
|
||||
{
|
||||
if(prevSize >4)
|
||||
{
|
||||
__m128 partialSolution;
|
||||
__m128 w;
|
||||
__m128 sols;
|
||||
__m128 temporaryConst1=_mm_set1_ps(1.0);
|
||||
__m128 temporaryConstLambda=_mm_set1_ps(-lambda);
|
||||
register size_t alignedPrev=prevSize>16?(prevSize-(prevSize%16)):0;
|
||||
for( size_t j=begin;j<end;j++)
|
||||
{
|
||||
partialSolution= _mm_setzero_ps();
|
||||
w=_mm_setzero_ps();
|
||||
for(register size_t k=alignedPrev;k<prevSize;k++)
|
||||
{
|
||||
w = _mm_load_ss(this->weights[layer][j]+k);
|
||||
sols = _mm_load_ss(sol+k);
|
||||
w=_mm_mul_ps(w,sols);
|
||||
partialSolution=_mm_add_ps(partialSolution,w);
|
||||
// w=_mm_shuffle_ps(w,w,3*2^0+0*2^2+1*2^4+2*2^6);
|
||||
// sols=_mm_shuffle_ps(sols,sols,3*2^0+0*2^2+1*2^4+2*2^6);
|
||||
}
|
||||
for(register size_t k=0;k<alignedPrev;k+=4)
|
||||
{
|
||||
w = _mm_load_ps(this->weights[layer][j]+k);
|
||||
//_mm_prefetch((char*)this->weights[layer][j]+k+4,_MM_HINT_T0);
|
||||
sols = _mm_load_ps(sol+k);
|
||||
w=_mm_mul_ps(w,sols);
|
||||
partialSolution=_mm_add_ps(partialSolution,w);
|
||||
}
|
||||
/* pre-SSE3 solution
|
||||
__m128 temp = _mm_add_ps(_mm_movehl_ps(foo128, foo128), foo128);
|
||||
float x;
|
||||
_mm_store_ss(&x, _mm_add_ss(temp, _mm_shuffle_ps(temp, 1)));
|
||||
*/
|
||||
partialSolution = _mm_hadd_ps(partialSolution, partialSolution);
|
||||
partialSolution = _mm_hadd_ps(partialSolution, partialSolution);
|
||||
_mm_store_ss(inputs[layer]+j,partialSolution);
|
||||
partialSolution=_mm_mul_ps(temporaryConstLambda,partialSolution); //-lambda*sol[k]
|
||||
partialSolution=exp_ps(partialSolution); //exp(sols)
|
||||
partialSolution= _mm_add_ps(partialSolution,temporaryConst1); //1+exp()
|
||||
partialSolution= _mm_div_ps(temporaryConst1,partialSolution);//1/....*/
|
||||
_mm_store_ss(newSolution+j,partialSolution);
|
||||
}
|
||||
}else
|
||||
{
|
||||
for( size_t j=begin;j<end;j++)
|
||||
{
|
||||
register float tmp=0;
|
||||
for(register size_t k=0;k<prevSize;k++)
|
||||
{
|
||||
tmp+=sol[k]*weights[layer][j][k];
|
||||
}
|
||||
newSolution[j]=(1.0/(1.0+exp(-lambda*tmp)));
|
||||
inputs[layer][j]=tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Solution FeedForwardNetworkQuick::solve(const Problem& p)
|
||||
{
|
||||
register float* sol=sums[1];
|
||||
|
||||
sums[0][0]=1;
|
||||
sol[0]=1;
|
||||
for(size_t i=0;i<p.size();i++)
|
||||
{
|
||||
sums[0][i+1]=p[i];
|
||||
sol[i+1]=p[i];
|
||||
}
|
||||
|
||||
register size_t prevSize=layerSizes[0];
|
||||
for(register size_t i=1;i<layers;i++)
|
||||
{
|
||||
float* newSolution= sums[i+1];
|
||||
if(threads > 1 && (layerSizes[i] > 700 ||prevSize > 700)) // 700 is an guess about actual size, when creating thread has some speedup
|
||||
{
|
||||
std::vector<std::thread> th;
|
||||
size_t s=1;
|
||||
size_t step =layerSizes[i]/threads;
|
||||
for(size_t t=1;t<=threads;t++)
|
||||
{
|
||||
//TODO do i need it to check?
|
||||
if(s>=layerSizes[i])
|
||||
break;
|
||||
th.push_back(std::thread([i,this,newSolution,prevSize,sol](size_t from, size_t to)->void{
|
||||
solvePart(newSolution,from,to,prevSize,sol,i);
|
||||
},s,t==threads?layerSizes[i]:s+step));//{}
|
||||
s+=step;
|
||||
}
|
||||
|
||||
for (auto& thr : th)
|
||||
thr.join();
|
||||
}else
|
||||
{
|
||||
solvePart(newSolution,1,layerSizes[i],prevSize,sol,i);
|
||||
}
|
||||
prevSize=layerSizes[i];
|
||||
sol=newSolution;
|
||||
}
|
||||
Solution ret;
|
||||
for(size_t i=1;i<prevSize;i++)
|
||||
{
|
||||
ret.push_back(sol[i]);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
FFLayer* FeedForwardNetworkQuick::operator[](int l)
|
||||
{
|
||||
if(ffLayers==nullptr)
|
||||
{
|
||||
ffLayers=new FFLayer*[layers];
|
||||
for(size_t i=0;i<layers;i++)
|
||||
{
|
||||
ffLayers[i]=new FFLayer(layerSizes[i],potentials[i],weights[i],sums[i+1],inputs[i],lambda);
|
||||
}
|
||||
}
|
||||
return ffLayers[l];
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
#ifndef _S_NN_FFQ_H_
|
||||
#define _S_NN_FFQ_H_
|
||||
|
||||
#include "Problem"
|
||||
#include "Solution"
|
||||
#include "Neuron"
|
||||
#include "FeedForward"
|
||||
|
||||
#include <vector>
|
||||
#include <initializer_list>
|
||||
#include <thread>
|
||||
|
||||
#include <iostream>
|
||||
#include <math.h>
|
||||
|
||||
#include <mmintrin.h>
|
||||
#include <xmmintrin.h>
|
||||
#include <emmintrin.h>
|
||||
#include <xmmintrin.h>
|
||||
|
||||
#include "../sse_mathfun.h"
|
||||
|
||||
#define LAMBDA 0.8
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
{
|
||||
class FFNeuron : public Neuron
|
||||
{
|
||||
public:
|
||||
FFNeuron() = delete;
|
||||
FFNeuron(const FFNeuron&) = delete;
|
||||
FFNeuron& operator=(const FFNeuron&) = delete;
|
||||
FFNeuron(float &pot, float *w, float &s, float &i,float lam):potential(pot),weights(w),sum(s),inputs(i),lambda(lam) { }
|
||||
|
||||
float getPotential() {return potential;}
|
||||
void setPotential(float p) { potential=p;}
|
||||
float getWeight(unsigned int i ) { return weights[i];}
|
||||
void setWeight(unsigned int i,float p) { weights[i]=p; }
|
||||
inline float output() const { return sum; }
|
||||
inline float input() const { return inputs; }
|
||||
inline float derivatedOutput() const { return lambda*output()*(1.0-output()); }
|
||||
protected:
|
||||
float &potential;
|
||||
float *weights;
|
||||
float ∑
|
||||
float &inputs;
|
||||
float lambda;
|
||||
private:
|
||||
};
|
||||
|
||||
class FFLayer//: public Layer
|
||||
{
|
||||
public:
|
||||
FFLayer(const FFLayer &) =delete;
|
||||
FFLayer operator=(const FFLayer &) = delete;
|
||||
FFLayer(size_t s, float *p,float **w,float *su,float *in,float lam): neurons(nullptr),layerSize(s),potentials(p),weights(w),sums(su),inputs(in),lambda(lam) {}
|
||||
~FFLayer();
|
||||
FFNeuron* operator[](int neuron);
|
||||
size_t size() const {return layerSize;};
|
||||
protected:
|
||||
FFNeuron **neurons;
|
||||
size_t layerSize;
|
||||
float *potentials;
|
||||
float **weights;
|
||||
float *sums;
|
||||
float *inputs;
|
||||
float lambda;
|
||||
};
|
||||
|
||||
class FeedForwardNetworkQuick:public ACyclicNetwork
|
||||
{
|
||||
public:
|
||||
FeedForwardNetworkQuick(const FeedForwardNetworkQuick &f) = delete; //TODO
|
||||
FeedForwardNetworkQuick operator=(const FeedForwardNetworkQuick &f)=delete;
|
||||
template<typename... Args>inline FeedForwardNetworkQuick(std::initializer_list<int> s, double lam=LAMBDA):ffLayers(nullptr),weights(nullptr),potentials(nullptr),sums(nullptr),inputs(nullptr),layerSizes(nullptr),layers(s.size()),lambda(lam)
|
||||
{
|
||||
weights= new float**[s.size()];
|
||||
potentials= new float*[s.size()];
|
||||
layerSizes= new size_t[s.size()];
|
||||
sums= new float*[s.size()+1];
|
||||
inputs= new float*[s.size()];
|
||||
int i=0;
|
||||
int prev_size=1;
|
||||
for(int layeSize:s) // TODO rename
|
||||
{
|
||||
layeSize+=1;
|
||||
if(i==0)
|
||||
{
|
||||
prev_size=layeSize;
|
||||
sums[0]= new float[layeSize];
|
||||
sums[0][0]=1.0;
|
||||
}
|
||||
layerSizes[i]=layeSize;
|
||||
weights[i]= new float*[layeSize];
|
||||
potentials[i]= new float[layeSize];
|
||||
sums[i+1]= new float[layeSize];
|
||||
inputs[i]= new float[layeSize];
|
||||
potentials[i][0]=1.0;
|
||||
sums[i+1][0]=1.0;
|
||||
for (int j=1;j<layeSize;j++)
|
||||
{
|
||||
potentials[i][j]=1.0;
|
||||
weights[i][j]= new float[prev_size];
|
||||
for(int k=0;k<prev_size;k++)
|
||||
{
|
||||
weights[i][j][k]=1.0-((float)(rand()%2001))/1000.0;
|
||||
}
|
||||
}
|
||||
i++;
|
||||
prev_size=layeSize;
|
||||
}
|
||||
}
|
||||
virtual ~FeedForwardNetworkQuick();
|
||||
virtual Solution solve(const Problem& p) override;
|
||||
unsigned size() { return layers;}
|
||||
FFLayer* operator[](int l);
|
||||
void setThreads(unsigned t) {threads=t;}
|
||||
protected:
|
||||
void solvePart(float *newSolution, size_t begin, size_t end,size_t prevSize, float* sol,size_t layer);
|
||||
private:
|
||||
FFLayer **ffLayers;
|
||||
float ***weights;
|
||||
float **potentials;
|
||||
public:
|
||||
float **sums;
|
||||
float **inputs;
|
||||
private:
|
||||
size_t *layerSizes;
|
||||
size_t layers;
|
||||
float lambda;
|
||||
unsigned threads=1;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,7 +1,7 @@
|
||||
#include "./BackPropagation"
|
||||
#include <thread>
|
||||
|
||||
Shin::NeuronNetwork::Learning::BackPropagation::BackPropagation(FeedForwardNetworkQuick &n): Supervised(n)
|
||||
Shin::NeuronNetwork::Learning::BackPropagation::BackPropagation(FeedForward &n): Supervised(n)
|
||||
{
|
||||
|
||||
}
|
||||
@@ -24,15 +24,15 @@ void Shin::NeuronNetwork::Learning::BackPropagation::propagate(const Shin::Neuro
|
||||
deltas=new float*[network.size()];
|
||||
for(size_t i=0;i<network.size();i++)
|
||||
{
|
||||
deltas[i]=new float[network[i]->size()];
|
||||
deltas[i]=new float[network[i].size()];
|
||||
deltas[i][0]=0.0;
|
||||
}
|
||||
}
|
||||
|
||||
for(size_t j=1;j<network[network.size()-1]->size();j++)
|
||||
for(size_t j=1;j<network[network.size()-1].size();j++)
|
||||
{
|
||||
deltas[network.size()-1][j]= correction(expectation[j-1],network[network.size()-1]->operator[](j)->output())
|
||||
*network[network.size()-1]->operator[](j)->derivatedOutput();
|
||||
deltas[network.size()-1][j]= correction(expectation[j-1],network[network.size()-1][j].output())
|
||||
*network[network.size()-1][j].derivatedOutput();
|
||||
}
|
||||
|
||||
for(int i=(int)network.size()-2;i>0;i--)
|
||||
@@ -43,51 +43,50 @@ void Shin::NeuronNetwork::Learning::BackPropagation::propagate(const Shin::Neuro
|
||||
size_t s=0;
|
||||
//TODO THIS IS NOT WORKING!!!
|
||||
#define THREADS 4
|
||||
int step =network[i]->size()/THREADS;
|
||||
int step =network[i].size()/THREADS;
|
||||
for(int t=1;t<=THREADS;t++)
|
||||
{
|
||||
if(s>=network[i]->size())
|
||||
if(s>=network[i].size())
|
||||
break;
|
||||
th.push_back(std::thread([&i,this](size_t from, size_t to)->void{
|
||||
for(size_t j=from;j<to;j++)
|
||||
{
|
||||
register float deltasWeight = 0;
|
||||
for(size_t k=1;k<this->network[i+1]->size();k++)
|
||||
for(size_t k=1;k<this->network[i+1].size();k++)
|
||||
{
|
||||
deltasWeight+=deltas[i+1][k]*this->network[i+1]->operator[](k)->getWeight(j);
|
||||
deltasWeight+=deltas[i+1][k]*this->network[i+1][k].getWeight(j);
|
||||
}
|
||||
//deltas[i][j]*=this->network[i]->operator[](j)->derivatedOutput(); // WHY THE HELL IS SEQ here??
|
||||
}
|
||||
},s,t==THREADS?network[i]->size():s+step));//{}
|
||||
},s,t==THREADS?network[i].size():s+step));//{}
|
||||
s+=step;
|
||||
}
|
||||
for (auto& thr : th)
|
||||
thr.join();
|
||||
}else
|
||||
{
|
||||
for(size_t j=0;j<network[i]->size();j++)
|
||||
for(size_t j=0;j<network[i].size();j++)
|
||||
{
|
||||
register float deltasWeight = 0;
|
||||
for(size_t k=1;k<this->network[i+1]->size();k++)
|
||||
for(size_t k=1;k<this->network[i+1].size();k++)
|
||||
{
|
||||
deltasWeight+=deltas[i+1][k]*this->network[i+1]->operator[](k)->getWeight(j);
|
||||
deltasWeight+=deltas[i+1][k]*this->network[i+1][k].getWeight(j);
|
||||
}
|
||||
deltas[i][j]=deltasWeight*this->network[i]->operator[](j)->derivatedOutput();
|
||||
deltas[i][j]=deltasWeight*this->network[i][j].derivatedOutput();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for(size_t i=1;i<network.size();i++)
|
||||
{
|
||||
size_t max=network[i-1]->size();
|
||||
size_t max=network[i-1].size();
|
||||
|
||||
for(size_t j=1;j<network[i]->size();j++)
|
||||
for(size_t j=1;j<network[i].size();j++)
|
||||
{
|
||||
network[i]->operator[](j)->setWeight(0,network[i]->operator[](j)->getWeight(0)+deltas[i][j]*learningCoeficient);
|
||||
network[i][j].setWeight(0,network[i][j].getWeight(0)+deltas[i][j]*learningCoeficient);
|
||||
for(size_t k=1;k<max;k++)
|
||||
{
|
||||
network[i]->operator[](j)->setWeight(k,
|
||||
network[i]->operator[](j)->getWeight(k)+learningCoeficient* deltas[i][j]*network[i-1]->operator[](k)->output());
|
||||
network[i][j].setWeight(k, network[i][j].getWeight(k)+learningCoeficient*deltas[i][j]*network[i-1][k].output());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -112,8 +111,6 @@ float Shin::NeuronNetwork::Learning::BackPropagation::teach(const Shin::NeuronNe
|
||||
propagate(solution);
|
||||
}
|
||||
|
||||
|
||||
// std::cerr << "error: " << error << "\n";
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#include <cstddef>
|
||||
|
||||
#include "../Solution.h"
|
||||
#include "../FeedForwardQuick.h"
|
||||
#include "../FeedForward.h"
|
||||
#include "Supervised"
|
||||
|
||||
/*
|
||||
@@ -29,7 +29,7 @@ namespace Learning
|
||||
class BackPropagation : public Supervised
|
||||
{
|
||||
public:
|
||||
BackPropagation(FeedForwardNetworkQuick &n);
|
||||
BackPropagation(FeedForward &n);
|
||||
virtual ~BackPropagation();
|
||||
|
||||
BackPropagation(const Shin::NeuronNetwork::Learning::BackPropagation&) =delete;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#include "./OpticalBackPropagation"
|
||||
|
||||
Shin::NeuronNetwork::Learning::OpticalBackPropagation::OpticalBackPropagation(FeedForwardNetworkQuick &n): BackPropagation(n)
|
||||
Shin::NeuronNetwork::Learning::OpticalBackPropagation::OpticalBackPropagation(FeedForward &n): BackPropagation(n)
|
||||
{
|
||||
setEntropySize(100);
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#include <cstddef>
|
||||
|
||||
#include "../Solution.h"
|
||||
#include "../FeedForwardQuick.h"
|
||||
#include "../FeedForward.h"
|
||||
#include "BackPropagation"
|
||||
|
||||
/*
|
||||
@@ -23,7 +23,7 @@ namespace Learning
|
||||
class OpticalBackPropagation : public BackPropagation
|
||||
{
|
||||
public:
|
||||
OpticalBackPropagation(FeedForwardNetworkQuick &n);
|
||||
OpticalBackPropagation(FeedForward &n);
|
||||
protected:
|
||||
virtual float correction(float expected, float computed) override;
|
||||
};
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#include <cstddef>
|
||||
|
||||
#include "../Problem.h"
|
||||
#include "../FeedForwardQuick.h"
|
||||
#include "../FeedForward.h"
|
||||
#include "BackPropagation"
|
||||
#include "Unsupervised"
|
||||
#include "RL/QFunction.h"
|
||||
|
||||
@@ -75,7 +75,7 @@ void Shin::NeuronNetwork::RL::QFunctionNetwork::initialiseNetwork(size_t input,
|
||||
{
|
||||
if(function == nullptr)
|
||||
{
|
||||
function = new FeedForwardNetworkQuick({(int)input,(int)size,(int)choices});
|
||||
function = new FeedForward({(int)input,(int)size,(int)choices});
|
||||
b= new Learning::BackPropagation(*function);
|
||||
}
|
||||
}
|
||||
@@ -107,7 +107,7 @@ void Shin::NeuronNetwork::RL::QFunctionNetwork::learnDelayed(std::vector< std::p
|
||||
solSize=function->solve(p[0].first).size();
|
||||
if (!solSize)
|
||||
return;
|
||||
for(int i=0;i<p.size();i++)
|
||||
for(size_t i=0;i<p.size();i++)
|
||||
{
|
||||
Solution s;
|
||||
for(int j=0;j<solSize;j++)
|
||||
@@ -143,7 +143,7 @@ void Shin::NeuronNetwork::RL::QFunctionNetwork::learn(Shin::NeuronNetwork::Solut
|
||||
}
|
||||
}
|
||||
|
||||
void Shin::NeuronNetwork::RL::QFunctionNetwork::learn(Shin::NeuronNetwork::Problem& p, int choice, float quality)
|
||||
void Shin::NeuronNetwork::RL::QFunctionNetwork::learn(Shin::NeuronNetwork::Problem& p, int, float quality)
|
||||
{
|
||||
if(quality>0)
|
||||
{
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
#define _Q_FUNCTION_H_
|
||||
|
||||
#include "../../Solution"
|
||||
#include "../../FeedForwardQuick"
|
||||
#include "../../FeedForward"
|
||||
#include "../BackPropagation.h"
|
||||
#include "../OpticalBackPropagation.h"
|
||||
#include <map>
|
||||
@@ -85,7 +85,7 @@ namespace RL
|
||||
void opticalBackPropagation() {delete b; b=new Learning::OpticalBackPropagation(*function);};
|
||||
private:
|
||||
Learning::BackPropagation *b;
|
||||
FeedForwardNetworkQuick * function;
|
||||
FeedForward * function;
|
||||
float learningA=0.05;
|
||||
float learningB=0.008;
|
||||
};
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#include "./Reinforcement"
|
||||
|
||||
Shin::NeuronNetwork::Learning::Reinforcement::Reinforcement(Shin::NeuronNetwork::FeedForwardNetworkQuick& n): Unsupervised(n), p(new BackPropagation(n))
|
||||
Shin::NeuronNetwork::Learning::Reinforcement::Reinforcement(Shin::NeuronNetwork::FeedForward& n): Unsupervised(n), p(new BackPropagation(n))
|
||||
{
|
||||
p->setLearningCoeficient(1);
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#include <cstddef>
|
||||
|
||||
#include "../Problem.h"
|
||||
#include "../FeedForwardQuick.h"
|
||||
#include "../FeedForward.h"
|
||||
#include "BackPropagation"
|
||||
#include "Unsupervised"
|
||||
#include "functional"
|
||||
@@ -40,7 +40,7 @@ namespace Learning
|
||||
class Reinforcement : public Unsupervised
|
||||
{
|
||||
public:
|
||||
Reinforcement(FeedForwardNetworkQuick &n);
|
||||
Reinforcement(FeedForward &n);
|
||||
~Reinforcement();
|
||||
Reinforcement(const Reinforcement&) =delete;
|
||||
Reinforcement& operator=(const Reinforcement&) =delete;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#include "./Supervised"
|
||||
Shin::NeuronNetwork::Learning::Supervised::Supervised(Shin::NeuronNetwork::FeedForwardNetworkQuick& n) :network(n)
|
||||
Shin::NeuronNetwork::Learning::Supervised::Supervised(Shin::NeuronNetwork::FeedForward& n) :network(n)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#include <cstddef>
|
||||
|
||||
#include "../Solution.h"
|
||||
#include "../FeedForwardQuick.h"
|
||||
#include "../FeedForward.h"
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
@@ -17,7 +17,7 @@ namespace Learning
|
||||
{
|
||||
public:
|
||||
Supervised() =delete;
|
||||
Supervised(FeedForwardNetworkQuick &n);
|
||||
Supervised(FeedForward &n);
|
||||
virtual ~Supervised() {};
|
||||
float calculateError(const Solution &expectation,const Solution &solution);
|
||||
virtual float teach(const Shin::NeuronNetwork::Problem &p,const Solution &solution)=0;
|
||||
@@ -25,7 +25,7 @@ namespace Learning
|
||||
void debugOn();
|
||||
void debugOff();
|
||||
protected:
|
||||
FeedForwardNetworkQuick &network;
|
||||
FeedForward &network;
|
||||
bool debug=0;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#include "./Unsupervised"
|
||||
|
||||
Shin::NeuronNetwork::Learning::Unsupervised::Unsupervised(Shin::NeuronNetwork::FeedForwardNetworkQuick& n) :network(n)
|
||||
Shin::NeuronNetwork::Learning::Unsupervised::Unsupervised(Shin::NeuronNetwork::FeedForward& n) :network(n)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#include <cstddef>
|
||||
|
||||
#include "../Solution.h"
|
||||
#include "../FeedForwardQuick.h"
|
||||
#include "../FeedForward.h"
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
@@ -17,12 +17,12 @@ namespace Learning
|
||||
{
|
||||
public:
|
||||
Unsupervised() =delete;
|
||||
Unsupervised(FeedForwardNetworkQuick &n);
|
||||
Unsupervised(FeedForward &n);
|
||||
virtual ~Unsupervised() {};
|
||||
void debugOn();
|
||||
void debugOff();
|
||||
protected:
|
||||
FeedForwardNetworkQuick &network;
|
||||
FeedForward &network;
|
||||
bool debug=0;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
OBJFILES= Neuron.o Network.o FeedForward.o FeedForwardQuick.o\
|
||||
OBJFILES=\
|
||||
FeedForward.o\
|
||||
Learning/Supervised.o Learning/BackPropagation.o Learning/OpticalBackPropagation.o\
|
||||
Learning/Unsupervised.o Learning/Reinforcement.o Learning/RL/QFunction.o Learning/QLearning.o\
|
||||
Solution.o Problem.o ./IO.o
|
||||
@@ -16,7 +17,7 @@ lib: $(LIBNAME).so $(LIBNAME).a
|
||||
$(LIBNAME).so: $(OBJFILES)
|
||||
$(CXX) -shared $(CXXFLAGS) $(OBJFILES) $(LINKFILES) -o $(LIBNAME).so
|
||||
|
||||
$(LIBNAME).a: $(OBJFILES)
|
||||
$(LIBNAME).a: $(OBJFILES) ./Neuron.h ./Network.h
|
||||
rm -f $(LIBNAME).a # create new library
|
||||
ar rcv $(LIBNAME).a $(OBJFILES) $(LINKFILES)
|
||||
ranlib $(LIBNAME).a
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
#include "Network"
|
||||
|
||||
using namespace Shin::NeuronNetwork;
|
||||
|
||||
Layer::Layer(int a):neurons()
|
||||
{
|
||||
while(a--)
|
||||
{
|
||||
neurons.push_back(new Neuron());
|
||||
}
|
||||
}
|
||||
|
||||
Layer::~Layer()
|
||||
{
|
||||
for(Neuron *n:neurons)
|
||||
{
|
||||
delete n;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Solution Layer::solve(const std::vector<float> &input)
|
||||
{
|
||||
Solution ret;
|
||||
for(Neuron *n:neurons)
|
||||
{
|
||||
ret.push_back(n->output(input));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
Neuron* Layer::operator[](int neuron) const
|
||||
{
|
||||
return neurons[neuron];
|
||||
}
|
||||
|
||||
@@ -10,46 +10,44 @@
|
||||
#include <initializer_list>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
{
|
||||
const float lambda=0.8;
|
||||
class Layer
|
||||
{
|
||||
public:
|
||||
virtual ~Layer() {};
|
||||
virtual Neuron& operator[](size_t neuron)=0;
|
||||
virtual size_t size() const=0;
|
||||
};
|
||||
|
||||
class Network
|
||||
{
|
||||
public:
|
||||
inline Network(double lam):lambda(lam) {};
|
||||
virtual ~Network() {};
|
||||
|
||||
virtual Solution solve(const Problem&)=0;
|
||||
virtual Layer& operator[](size_t layer)=0;
|
||||
inline float getLambda() const {return lambda;}
|
||||
|
||||
protected:
|
||||
float lambda;
|
||||
private:
|
||||
};
|
||||
|
||||
class ACyclicNetwork : public Network
|
||||
{
|
||||
public:
|
||||
inline ACyclicNetwork(double lam):Network(lam) {};
|
||||
virtual size_t size() const=0;
|
||||
protected:
|
||||
private:
|
||||
};
|
||||
|
||||
class Layer
|
||||
{
|
||||
public:
|
||||
Layer(int a);
|
||||
Layer(const Layer &l):neurons()
|
||||
{
|
||||
for(unsigned i=0;i<l.neurons.size();i++)
|
||||
{
|
||||
neurons.push_back(new Neuron(*l.neurons[i]));
|
||||
}
|
||||
}
|
||||
~Layer();
|
||||
Solution solve(const std::vector<float> &input);
|
||||
Neuron* operator[](int neuron) const;
|
||||
int size() const {return neurons.size();};
|
||||
protected:
|
||||
std::vector<Neuron*> neurons;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,51 +0,0 @@
|
||||
#include "./Neuron"
|
||||
|
||||
using namespace Shin::NeuronNetwork;
|
||||
|
||||
Neuron::Neuron(): potential(1),weights()
|
||||
{
|
||||
|
||||
}
|
||||
float Neuron::getPotential() const
|
||||
{
|
||||
return potential;
|
||||
}
|
||||
|
||||
void Neuron::setPotential(float p)
|
||||
{
|
||||
potential=p;
|
||||
}
|
||||
|
||||
float Neuron::getWeight(unsigned int i) const
|
||||
{
|
||||
if(i >= weights.size())
|
||||
{
|
||||
return 1.0;
|
||||
}
|
||||
return weights[0];
|
||||
}
|
||||
|
||||
void Neuron::setWeight(unsigned int i,float p)
|
||||
{
|
||||
if(i >= weights.size())
|
||||
{
|
||||
// std::cout << "resize to " << i;
|
||||
weights.resize(i+1);
|
||||
}
|
||||
// std::cerr << "Set " << i << " to " << p << "\n";
|
||||
weights[i]=p;
|
||||
}
|
||||
|
||||
float Neuron::output(std::vector<float> input)
|
||||
{
|
||||
register float sum=0;
|
||||
for(unsigned int i=0;i<input.size();i++)
|
||||
{
|
||||
// std::cerr << "W: " << getWeight(i) <<"\n";
|
||||
sum+=getWeight(i)*input[i];
|
||||
}
|
||||
return 1.0/(1.0+exp(-0.5*sum));
|
||||
if(sum <= getPotential())
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
@@ -1,8 +1,7 @@
|
||||
#ifndef _S_NN_NEURON_H_
|
||||
#define _S_NN_NEURON_H_
|
||||
|
||||
#include <vector>
|
||||
#include <math.h>
|
||||
#include <cstdarg>
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
@@ -11,26 +10,18 @@ namespace NeuronNetwork
|
||||
class Neuron
|
||||
{
|
||||
public:
|
||||
Neuron();
|
||||
Neuron(const Neuron &n):potential(n.potential),weights(n.weights)
|
||||
{
|
||||
|
||||
}
|
||||
float getPotential() const;
|
||||
void setPotential(float p);
|
||||
float getWeight(unsigned int) const;
|
||||
void setWeight(unsigned int i,float p);
|
||||
float output(const std::vector<float>);
|
||||
float output() { return lastOutput;}
|
||||
Neuron() {};
|
||||
virtual ~Neuron() {};
|
||||
virtual float getPotential() const =0;
|
||||
virtual void setPotential(float p) =0;
|
||||
|
||||
virtual float getWeight(size_t) const =0;
|
||||
virtual void setWeight(size_t i,float p) =0;
|
||||
|
||||
virtual float output() const =0;
|
||||
virtual float input() const=0;
|
||||
virtual float derivatedOutput() const=0;
|
||||
protected:
|
||||
double potential;
|
||||
private:
|
||||
std::vector<float> weights;
|
||||
float lastOutput=0.0;
|
||||
float lastInput=0.0;
|
||||
};
|
||||
class SimpleNeuron: public Neuron
|
||||
{
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user