initial cleaning
This commit is contained in:
@@ -2,31 +2,219 @@
|
||||
|
||||
using namespace Shin::NeuronNetwork;
|
||||
|
||||
FeedForwardNetwork::~FeedForwardNetwork()
|
||||
FFLayer::~FFLayer()
|
||||
{
|
||||
for(Layer *l:layers)
|
||||
if(neurons!=nullptr)
|
||||
{
|
||||
delete l;
|
||||
for(size_t i=0;i<layerSize;i++)
|
||||
{
|
||||
delete neurons[i];
|
||||
}
|
||||
delete[] neurons;
|
||||
}
|
||||
}
|
||||
|
||||
Solution FeedForwardNetwork::solve(const Problem& p)
|
||||
FFNeuron& FFLayer::operator[](size_t neuron)
|
||||
{
|
||||
Solution s=Solution(p);
|
||||
for (Layer *l:layers)
|
||||
if(neurons==nullptr)
|
||||
{
|
||||
s=l->solve(s);
|
||||
neurons=new FFNeuron*[layerSize];
|
||||
for(size_t i=0;i<layerSize;i++)
|
||||
{
|
||||
neurons[i]=new FFNeuron(potentials[i],weights[i],sums[i],inputs[i],lambda);
|
||||
}
|
||||
}
|
||||
return s;
|
||||
return *neurons[neuron];
|
||||
}
|
||||
|
||||
const Layer* FeedForwardNetwork::operator[](int layer)
|
||||
FeedForward::FeedForward(std::initializer_list< int > s, double lam): ACyclicNetwork(lam),layers(s.size())
|
||||
{
|
||||
return layers[layer];
|
||||
weights= new float**[s.size()];
|
||||
potentials= new float*[s.size()];
|
||||
layerSizes= new size_t[s.size()];
|
||||
sums= new float*[s.size()+1];
|
||||
inputs= new float*[s.size()];
|
||||
int i=0;
|
||||
int prev_size=1;
|
||||
for(int layeSize:s) // TODO rename
|
||||
{
|
||||
layeSize+=1;
|
||||
if(i==0)
|
||||
{
|
||||
prev_size=layeSize;
|
||||
sums[0]= new float[layeSize];
|
||||
sums[0][0]=1.0;
|
||||
}
|
||||
layerSizes[i]=layeSize;
|
||||
weights[i]= new float*[layeSize];
|
||||
potentials[i]= new float[layeSize];
|
||||
sums[i+1]= new float[layeSize];
|
||||
inputs[i]= new float[layeSize];
|
||||
potentials[i][0]=1.0;
|
||||
sums[i+1][0]=1.0;
|
||||
for (int j=1;j<layeSize;j++)
|
||||
{
|
||||
potentials[i][j]=1.0;
|
||||
weights[i][j]= new float[prev_size];
|
||||
for(int k=0;k<prev_size;k++)
|
||||
{
|
||||
weights[i][j][k]=1.0-((float)(rand()%2001))/1000.0;
|
||||
}
|
||||
}
|
||||
i++;
|
||||
prev_size=layeSize;
|
||||
}
|
||||
}
|
||||
|
||||
void FeedForwardNetwork::addLayer(int neurons)
|
||||
FeedForward::~FeedForward()
|
||||
{
|
||||
layers.push_back(new Layer(neurons));
|
||||
if(weights != nullptr)
|
||||
{
|
||||
for(size_t i=0;i<layers;i++)
|
||||
{
|
||||
for (size_t j=1;j<layerSizes[i];j++)
|
||||
{
|
||||
delete[] weights[i][j];
|
||||
}
|
||||
delete[] weights[i];
|
||||
delete[] potentials[i];
|
||||
delete[] sums[i];
|
||||
delete[] inputs[i];
|
||||
}
|
||||
delete[] sums[layers];
|
||||
delete[] weights;
|
||||
delete[] potentials;
|
||||
delete[] layerSizes;
|
||||
delete[] sums;
|
||||
delete[] inputs;
|
||||
}
|
||||
if(ffLayers !=nullptr)
|
||||
{
|
||||
for(size_t i=0;i<layers;i++)
|
||||
{
|
||||
delete ffLayers[i];
|
||||
}
|
||||
delete[] ffLayers;
|
||||
}
|
||||
}
|
||||
|
||||
void FeedForward::solvePart(float *newSolution, register size_t begin, size_t end,size_t prevSize, float *sol,size_t layer)
|
||||
{
|
||||
if(prevSize >4)
|
||||
{
|
||||
__m128 partialSolution;
|
||||
__m128 w;
|
||||
__m128 sols;
|
||||
__m128 temporaryConst1=_mm_set1_ps(1.0);
|
||||
__m128 temporaryConstLambda=_mm_set1_ps(-lambda);
|
||||
register size_t alignedPrev=prevSize>16?(prevSize-(prevSize%16)):0;
|
||||
for( size_t j=begin;j<end;j++)
|
||||
{
|
||||
partialSolution= _mm_setzero_ps();
|
||||
w=_mm_setzero_ps();
|
||||
for(register size_t k=alignedPrev;k<prevSize;k++)
|
||||
{
|
||||
w = _mm_load_ss(this->weights[layer][j]+k);
|
||||
sols = _mm_load_ss(sol+k);
|
||||
w=_mm_mul_ps(w,sols);
|
||||
partialSolution=_mm_add_ps(partialSolution,w);
|
||||
// w=_mm_shuffle_ps(w,w,3*2^0+0*2^2+1*2^4+2*2^6);
|
||||
// sols=_mm_shuffle_ps(sols,sols,3*2^0+0*2^2+1*2^4+2*2^6);
|
||||
}
|
||||
for(register size_t k=0;k<alignedPrev;k+=4)
|
||||
{
|
||||
w = _mm_load_ps(this->weights[layer][j]+k);
|
||||
//_mm_prefetch((char*)this->weights[layer][j]+k+4,_MM_HINT_T0);
|
||||
sols = _mm_load_ps(sol+k);
|
||||
w=_mm_mul_ps(w,sols);
|
||||
partialSolution=_mm_add_ps(partialSolution,w);
|
||||
}
|
||||
/* pre-SSE3 solution
|
||||
__m128 temp = _mm_add_ps(_mm_movehl_ps(foo128, foo128), foo128);
|
||||
float x;
|
||||
_mm_store_ss(&x, _mm_add_ss(temp, _mm_shuffle_ps(temp, 1)));
|
||||
*/
|
||||
partialSolution = _mm_hadd_ps(partialSolution, partialSolution);
|
||||
partialSolution = _mm_hadd_ps(partialSolution, partialSolution);
|
||||
_mm_store_ss(inputs[layer]+j,partialSolution);
|
||||
partialSolution=_mm_mul_ps(temporaryConstLambda,partialSolution); //-lambda*sol[k]
|
||||
partialSolution=exp_ps(partialSolution); //exp(sols)
|
||||
partialSolution= _mm_add_ps(partialSolution,temporaryConst1); //1+exp()
|
||||
partialSolution= _mm_div_ps(temporaryConst1,partialSolution);//1/....*/
|
||||
_mm_store_ss(newSolution+j,partialSolution);
|
||||
}
|
||||
}else
|
||||
{
|
||||
for( size_t j=begin;j<end;j++)
|
||||
{
|
||||
register float tmp=0;
|
||||
for(register size_t k=0;k<prevSize;k++)
|
||||
{
|
||||
tmp+=sol[k]*weights[layer][j][k];
|
||||
}
|
||||
newSolution[j]=(1.0/(1.0+exp(-lambda*tmp)));
|
||||
inputs[layer][j]=tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Solution FeedForward::solve(const Problem& p)
|
||||
{
|
||||
register float* sol=sums[1];
|
||||
|
||||
sums[0][0]=1;
|
||||
sol[0]=1;
|
||||
for(size_t i=0;i<p.size();i++)
|
||||
{
|
||||
sums[0][i+1]=p[i];
|
||||
sol[i+1]=p[i];
|
||||
}
|
||||
|
||||
register size_t prevSize=layerSizes[0];
|
||||
for(register size_t i=1;i<layers;i++)
|
||||
{
|
||||
float* newSolution= sums[i+1];
|
||||
if(threads > 1 && (layerSizes[i] > 700 ||prevSize > 700)) // 700 is an guess about actual size, when creating thread has some speedup
|
||||
{
|
||||
std::vector<std::thread> th;
|
||||
size_t s=1;
|
||||
size_t step =layerSizes[i]/threads;
|
||||
for(size_t t=1;t<=threads;t++)
|
||||
{
|
||||
//TODO do i need it to check?
|
||||
if(s>=layerSizes[i])
|
||||
break;
|
||||
th.push_back(std::thread([i,this,newSolution,prevSize,sol](size_t from, size_t to)->void{
|
||||
solvePart(newSolution,from,to,prevSize,sol,i);
|
||||
},s,t==threads?layerSizes[i]:s+step));//{}
|
||||
s+=step;
|
||||
}
|
||||
|
||||
for (auto& thr : th)
|
||||
thr.join();
|
||||
}else
|
||||
{
|
||||
solvePart(newSolution,1,layerSizes[i],prevSize,sol,i);
|
||||
}
|
||||
prevSize=layerSizes[i];
|
||||
sol=newSolution;
|
||||
}
|
||||
Solution ret;
|
||||
for(size_t i=1;i<prevSize;i++)
|
||||
{
|
||||
ret.push_back(sol[i]);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
FFLayer& FeedForward::operator[](size_t l)
|
||||
{
|
||||
if(ffLayers==nullptr)
|
||||
{
|
||||
ffLayers=new FFLayer*[layers];
|
||||
for(size_t i=0;i<layers;i++)
|
||||
{
|
||||
ffLayers[i]=new FFLayer(layerSizes[i],potentials[i],weights[i],sums[i+1],inputs[i],lambda);
|
||||
}
|
||||
}
|
||||
return *ffLayers[l];
|
||||
}
|
||||
|
||||
@@ -6,43 +6,98 @@
|
||||
#include "Neuron"
|
||||
#include "Network"
|
||||
|
||||
#include <cstdarg>
|
||||
#include <vector>
|
||||
#include <initializer_list>
|
||||
#include <thread>
|
||||
|
||||
#include <iostream>
|
||||
#include <math.h>
|
||||
|
||||
#include <mmintrin.h>
|
||||
#include <xmmintrin.h>
|
||||
#include <emmintrin.h>
|
||||
#include <xmmintrin.h>
|
||||
#include <pmmintrin.h>
|
||||
|
||||
#include "../sse_mathfun.h"
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
{
|
||||
|
||||
// template <typename _NT>
|
||||
class FeedForwardNetwork : public ACyclicNetwork
|
||||
class FFNeuron : public Neuron
|
||||
{
|
||||
public:
|
||||
FeedForwardNetwork(const FeedForwardNetwork &f):first(nullptr),last(nullptr),layers()
|
||||
{
|
||||
for(Layer *l:f.layers)
|
||||
{
|
||||
layers.push_back(new Layer(*l));
|
||||
last=layers[layers.size()-1];
|
||||
}
|
||||
first=layers[0];
|
||||
}
|
||||
FeedForwardNetwork operator=(const FeedForwardNetwork &f)=delete;
|
||||
template<typename... Args>inline FeedForwardNetwork(std::initializer_list<int> s):first(nullptr),last(nullptr),layers() { for(const int i:s) {addLayer(i);}}
|
||||
//inline FeedForwardNetwork(std::vector<int> q);
|
||||
~FeedForwardNetwork();
|
||||
|
||||
virtual Solution solve(const Problem& p) override;
|
||||
unsigned size() {return layers.size();}
|
||||
const Layer* operator[](int layer);
|
||||
FFNeuron() = delete;
|
||||
FFNeuron(const FFNeuron&) = delete;
|
||||
FFNeuron& operator=(const FFNeuron&) = delete;
|
||||
|
||||
FFNeuron(float &pot, float *w, float &s, float &i,float lam):potential(pot),weights(w),sum(s),inputs(i),lambda(lam) { }
|
||||
|
||||
inline virtual float getPotential() const override {return potential;}
|
||||
inline virtual void setPotential(float p) { potential=p;}
|
||||
|
||||
inline virtual float getWeight(size_t i ) const override { return weights[i];}
|
||||
inline virtual void setWeight(size_t i,float p) override { weights[i]=p; }
|
||||
|
||||
inline virtual float output() const { return sum; }
|
||||
inline virtual float input() const { return inputs; }
|
||||
inline virtual float derivatedOutput() const { return lambda*output()*(1.0-output()); }
|
||||
protected:
|
||||
void addLayer(int neurons);
|
||||
float &potential;
|
||||
float *weights;
|
||||
float ∑
|
||||
float &inputs;
|
||||
float lambda;
|
||||
private:
|
||||
};
|
||||
|
||||
class FFLayer: public Layer
|
||||
{
|
||||
public:
|
||||
FFLayer(size_t s, float *p,float **w,float *su,float *in,float lam): neurons(nullptr),layerSize(s),potentials(p),weights(w),sums(su),inputs(in),lambda(lam) {}
|
||||
~FFLayer();
|
||||
|
||||
FFLayer(const FFLayer &) = delete;
|
||||
FFLayer& operator=(const FFLayer &) = delete;
|
||||
|
||||
// inline virtual Neuron& operator[](size_t layer) override {return operator[](layer);};
|
||||
virtual FFNeuron& operator[](size_t layer) override;
|
||||
inline virtual size_t size() const override {return layerSize;};
|
||||
protected:
|
||||
FFNeuron **neurons;
|
||||
size_t layerSize;
|
||||
float *potentials;
|
||||
float **weights;
|
||||
float *sums;
|
||||
float *inputs;
|
||||
float lambda;
|
||||
};
|
||||
|
||||
class FeedForward:public ACyclicNetwork
|
||||
{
|
||||
public:
|
||||
FeedForward(std::initializer_list<int> s, double lam=Shin::NeuronNetwork::lambda);
|
||||
FeedForward(const FeedForward &f) = delete; //TODO
|
||||
FeedForward operator=(const FeedForward &f)=delete;
|
||||
virtual ~FeedForward();
|
||||
virtual Solution solve(const Problem& p) override;
|
||||
virtual size_t size() const override { return layers;};
|
||||
virtual FFLayer& operator[](size_t l) override;
|
||||
void setThreads(unsigned t) {threads=t;}
|
||||
protected:
|
||||
void solvePart(float *newSolution, size_t begin, size_t end,size_t prevSize, float* sol,size_t layer);
|
||||
private:
|
||||
Layer* first;
|
||||
Layer* last ;
|
||||
std::vector<Layer*> layers;
|
||||
FFLayer **ffLayers=nullptr;
|
||||
float ***weights=nullptr;
|
||||
float **potentials=nullptr;
|
||||
public:
|
||||
float **sums=nullptr;
|
||||
float **inputs=nullptr;
|
||||
private:
|
||||
size_t *layerSizes=nullptr;
|
||||
size_t layers;
|
||||
unsigned threads=1;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
./FeedForwardQuick.h
|
||||
@@ -1,183 +0,0 @@
|
||||
#include "FeedForwardQuick"
|
||||
#include <pmmintrin.h>
|
||||
|
||||
using namespace Shin::NeuronNetwork;
|
||||
|
||||
FFLayer::~FFLayer()
|
||||
{
|
||||
if(neurons!=nullptr)
|
||||
{
|
||||
for(size_t i=0;i<layerSize;i++)
|
||||
{
|
||||
delete neurons[i];
|
||||
}
|
||||
delete[] neurons;
|
||||
}
|
||||
}
|
||||
|
||||
FFNeuron* FFLayer::operator[](int neuron)
|
||||
{
|
||||
if(neurons==nullptr)
|
||||
{
|
||||
neurons=new FFNeuron*[layerSize];
|
||||
for(size_t i=0;i<layerSize;i++)
|
||||
{
|
||||
neurons[i]=new FFNeuron(potentials[i],weights[i],sums[i],inputs[i],lambda);
|
||||
}
|
||||
}
|
||||
return neurons[neuron];
|
||||
}
|
||||
|
||||
FeedForwardNetworkQuick::~FeedForwardNetworkQuick()
|
||||
{
|
||||
if(weights != nullptr)
|
||||
{
|
||||
for(size_t i=0;i<layers;i++)
|
||||
{
|
||||
for (size_t j=1;j<layerSizes[i];j++)
|
||||
{
|
||||
delete[] weights[i][j];
|
||||
}
|
||||
delete[] weights[i];
|
||||
delete[] potentials[i];
|
||||
delete[] sums[i];
|
||||
delete[] inputs[i];
|
||||
}
|
||||
delete[] sums[layers];
|
||||
delete[] weights;
|
||||
delete[] potentials;
|
||||
delete[] layerSizes;
|
||||
delete[] sums;
|
||||
delete[] inputs;
|
||||
}
|
||||
if(ffLayers !=nullptr)
|
||||
{
|
||||
for(size_t i=0;i<layers;i++)
|
||||
{
|
||||
delete ffLayers[i];
|
||||
}
|
||||
delete[] ffLayers;
|
||||
}
|
||||
}
|
||||
|
||||
void FeedForwardNetworkQuick::solvePart(float *newSolution, register size_t begin, size_t end,size_t prevSize, float *sol,size_t layer)
|
||||
{
|
||||
if(prevSize >4)
|
||||
{
|
||||
__m128 partialSolution;
|
||||
__m128 w;
|
||||
__m128 sols;
|
||||
__m128 temporaryConst1=_mm_set1_ps(1.0);
|
||||
__m128 temporaryConstLambda=_mm_set1_ps(-lambda);
|
||||
register size_t alignedPrev=prevSize>16?(prevSize-(prevSize%16)):0;
|
||||
for( size_t j=begin;j<end;j++)
|
||||
{
|
||||
partialSolution= _mm_setzero_ps();
|
||||
w=_mm_setzero_ps();
|
||||
for(register size_t k=alignedPrev;k<prevSize;k++)
|
||||
{
|
||||
w = _mm_load_ss(this->weights[layer][j]+k);
|
||||
sols = _mm_load_ss(sol+k);
|
||||
w=_mm_mul_ps(w,sols);
|
||||
partialSolution=_mm_add_ps(partialSolution,w);
|
||||
// w=_mm_shuffle_ps(w,w,3*2^0+0*2^2+1*2^4+2*2^6);
|
||||
// sols=_mm_shuffle_ps(sols,sols,3*2^0+0*2^2+1*2^4+2*2^6);
|
||||
}
|
||||
for(register size_t k=0;k<alignedPrev;k+=4)
|
||||
{
|
||||
w = _mm_load_ps(this->weights[layer][j]+k);
|
||||
//_mm_prefetch((char*)this->weights[layer][j]+k+4,_MM_HINT_T0);
|
||||
sols = _mm_load_ps(sol+k);
|
||||
w=_mm_mul_ps(w,sols);
|
||||
partialSolution=_mm_add_ps(partialSolution,w);
|
||||
}
|
||||
/* pre-SSE3 solution
|
||||
__m128 temp = _mm_add_ps(_mm_movehl_ps(foo128, foo128), foo128);
|
||||
float x;
|
||||
_mm_store_ss(&x, _mm_add_ss(temp, _mm_shuffle_ps(temp, 1)));
|
||||
*/
|
||||
partialSolution = _mm_hadd_ps(partialSolution, partialSolution);
|
||||
partialSolution = _mm_hadd_ps(partialSolution, partialSolution);
|
||||
_mm_store_ss(inputs[layer]+j,partialSolution);
|
||||
partialSolution=_mm_mul_ps(temporaryConstLambda,partialSolution); //-lambda*sol[k]
|
||||
partialSolution=exp_ps(partialSolution); //exp(sols)
|
||||
partialSolution= _mm_add_ps(partialSolution,temporaryConst1); //1+exp()
|
||||
partialSolution= _mm_div_ps(temporaryConst1,partialSolution);//1/....*/
|
||||
_mm_store_ss(newSolution+j,partialSolution);
|
||||
}
|
||||
}else
|
||||
{
|
||||
for( size_t j=begin;j<end;j++)
|
||||
{
|
||||
register float tmp=0;
|
||||
for(register size_t k=0;k<prevSize;k++)
|
||||
{
|
||||
tmp+=sol[k]*weights[layer][j][k];
|
||||
}
|
||||
newSolution[j]=(1.0/(1.0+exp(-lambda*tmp)));
|
||||
inputs[layer][j]=tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Solution FeedForwardNetworkQuick::solve(const Problem& p)
|
||||
{
|
||||
register float* sol=sums[1];
|
||||
|
||||
sums[0][0]=1;
|
||||
sol[0]=1;
|
||||
for(size_t i=0;i<p.size();i++)
|
||||
{
|
||||
sums[0][i+1]=p[i];
|
||||
sol[i+1]=p[i];
|
||||
}
|
||||
|
||||
register size_t prevSize=layerSizes[0];
|
||||
for(register size_t i=1;i<layers;i++)
|
||||
{
|
||||
float* newSolution= sums[i+1];
|
||||
if(threads > 1 && (layerSizes[i] > 700 ||prevSize > 700)) // 700 is an guess about actual size, when creating thread has some speedup
|
||||
{
|
||||
std::vector<std::thread> th;
|
||||
size_t s=1;
|
||||
size_t step =layerSizes[i]/threads;
|
||||
for(size_t t=1;t<=threads;t++)
|
||||
{
|
||||
//TODO do i need it to check?
|
||||
if(s>=layerSizes[i])
|
||||
break;
|
||||
th.push_back(std::thread([i,this,newSolution,prevSize,sol](size_t from, size_t to)->void{
|
||||
solvePart(newSolution,from,to,prevSize,sol,i);
|
||||
},s,t==threads?layerSizes[i]:s+step));//{}
|
||||
s+=step;
|
||||
}
|
||||
|
||||
for (auto& thr : th)
|
||||
thr.join();
|
||||
}else
|
||||
{
|
||||
solvePart(newSolution,1,layerSizes[i],prevSize,sol,i);
|
||||
}
|
||||
prevSize=layerSizes[i];
|
||||
sol=newSolution;
|
||||
}
|
||||
Solution ret;
|
||||
for(size_t i=1;i<prevSize;i++)
|
||||
{
|
||||
ret.push_back(sol[i]);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
FFLayer* FeedForwardNetworkQuick::operator[](int l)
|
||||
{
|
||||
if(ffLayers==nullptr)
|
||||
{
|
||||
ffLayers=new FFLayer*[layers];
|
||||
for(size_t i=0;i<layers;i++)
|
||||
{
|
||||
ffLayers[i]=new FFLayer(layerSizes[i],potentials[i],weights[i],sums[i+1],inputs[i],lambda);
|
||||
}
|
||||
}
|
||||
return ffLayers[l];
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
#ifndef _S_NN_FFQ_H_
|
||||
#define _S_NN_FFQ_H_
|
||||
|
||||
#include "Problem"
|
||||
#include "Solution"
|
||||
#include "Neuron"
|
||||
#include "FeedForward"
|
||||
|
||||
#include <vector>
|
||||
#include <initializer_list>
|
||||
#include <thread>
|
||||
|
||||
#include <iostream>
|
||||
#include <math.h>
|
||||
|
||||
#include <mmintrin.h>
|
||||
#include <xmmintrin.h>
|
||||
#include <emmintrin.h>
|
||||
#include <xmmintrin.h>
|
||||
|
||||
#include "../sse_mathfun.h"
|
||||
|
||||
#define LAMBDA 0.8
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
{
|
||||
class FFNeuron : public Neuron
|
||||
{
|
||||
public:
|
||||
FFNeuron() = delete;
|
||||
FFNeuron(const FFNeuron&) = delete;
|
||||
FFNeuron& operator=(const FFNeuron&) = delete;
|
||||
FFNeuron(float &pot, float *w, float &s, float &i,float lam):potential(pot),weights(w),sum(s),inputs(i),lambda(lam) { }
|
||||
|
||||
float getPotential() {return potential;}
|
||||
void setPotential(float p) { potential=p;}
|
||||
float getWeight(unsigned int i ) { return weights[i];}
|
||||
void setWeight(unsigned int i,float p) { weights[i]=p; }
|
||||
inline float output() const { return sum; }
|
||||
inline float input() const { return inputs; }
|
||||
inline float derivatedOutput() const { return lambda*output()*(1.0-output()); }
|
||||
protected:
|
||||
float &potential;
|
||||
float *weights;
|
||||
float ∑
|
||||
float &inputs;
|
||||
float lambda;
|
||||
private:
|
||||
};
|
||||
|
||||
class FFLayer//: public Layer
|
||||
{
|
||||
public:
|
||||
FFLayer(const FFLayer &) =delete;
|
||||
FFLayer operator=(const FFLayer &) = delete;
|
||||
FFLayer(size_t s, float *p,float **w,float *su,float *in,float lam): neurons(nullptr),layerSize(s),potentials(p),weights(w),sums(su),inputs(in),lambda(lam) {}
|
||||
~FFLayer();
|
||||
FFNeuron* operator[](int neuron);
|
||||
size_t size() const {return layerSize;};
|
||||
protected:
|
||||
FFNeuron **neurons;
|
||||
size_t layerSize;
|
||||
float *potentials;
|
||||
float **weights;
|
||||
float *sums;
|
||||
float *inputs;
|
||||
float lambda;
|
||||
};
|
||||
|
||||
class FeedForwardNetworkQuick:public ACyclicNetwork
|
||||
{
|
||||
public:
|
||||
FeedForwardNetworkQuick(const FeedForwardNetworkQuick &f) = delete; //TODO
|
||||
FeedForwardNetworkQuick operator=(const FeedForwardNetworkQuick &f)=delete;
|
||||
template<typename... Args>inline FeedForwardNetworkQuick(std::initializer_list<int> s, double lam=LAMBDA):ffLayers(nullptr),weights(nullptr),potentials(nullptr),sums(nullptr),inputs(nullptr),layerSizes(nullptr),layers(s.size()),lambda(lam)
|
||||
{
|
||||
weights= new float**[s.size()];
|
||||
potentials= new float*[s.size()];
|
||||
layerSizes= new size_t[s.size()];
|
||||
sums= new float*[s.size()+1];
|
||||
inputs= new float*[s.size()];
|
||||
int i=0;
|
||||
int prev_size=1;
|
||||
for(int layeSize:s) // TODO rename
|
||||
{
|
||||
layeSize+=1;
|
||||
if(i==0)
|
||||
{
|
||||
prev_size=layeSize;
|
||||
sums[0]= new float[layeSize];
|
||||
sums[0][0]=1.0;
|
||||
}
|
||||
layerSizes[i]=layeSize;
|
||||
weights[i]= new float*[layeSize];
|
||||
potentials[i]= new float[layeSize];
|
||||
sums[i+1]= new float[layeSize];
|
||||
inputs[i]= new float[layeSize];
|
||||
potentials[i][0]=1.0;
|
||||
sums[i+1][0]=1.0;
|
||||
for (int j=1;j<layeSize;j++)
|
||||
{
|
||||
potentials[i][j]=1.0;
|
||||
weights[i][j]= new float[prev_size];
|
||||
for(int k=0;k<prev_size;k++)
|
||||
{
|
||||
weights[i][j][k]=1.0-((float)(rand()%2001))/1000.0;
|
||||
}
|
||||
}
|
||||
i++;
|
||||
prev_size=layeSize;
|
||||
}
|
||||
}
|
||||
virtual ~FeedForwardNetworkQuick();
|
||||
virtual Solution solve(const Problem& p) override;
|
||||
unsigned size() { return layers;}
|
||||
FFLayer* operator[](int l);
|
||||
void setThreads(unsigned t) {threads=t;}
|
||||
protected:
|
||||
void solvePart(float *newSolution, size_t begin, size_t end,size_t prevSize, float* sol,size_t layer);
|
||||
private:
|
||||
FFLayer **ffLayers;
|
||||
float ***weights;
|
||||
float **potentials;
|
||||
public:
|
||||
float **sums;
|
||||
float **inputs;
|
||||
private:
|
||||
size_t *layerSizes;
|
||||
size_t layers;
|
||||
float lambda;
|
||||
unsigned threads=1;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,7 +1,7 @@
|
||||
#include "./BackPropagation"
|
||||
#include <thread>
|
||||
|
||||
Shin::NeuronNetwork::Learning::BackPropagation::BackPropagation(FeedForwardNetworkQuick &n): Supervised(n)
|
||||
Shin::NeuronNetwork::Learning::BackPropagation::BackPropagation(FeedForward &n): Supervised(n)
|
||||
{
|
||||
|
||||
}
|
||||
@@ -24,15 +24,15 @@ void Shin::NeuronNetwork::Learning::BackPropagation::propagate(const Shin::Neuro
|
||||
deltas=new float*[network.size()];
|
||||
for(size_t i=0;i<network.size();i++)
|
||||
{
|
||||
deltas[i]=new float[network[i]->size()];
|
||||
deltas[i]=new float[network[i].size()];
|
||||
deltas[i][0]=0.0;
|
||||
}
|
||||
}
|
||||
|
||||
for(size_t j=1;j<network[network.size()-1]->size();j++)
|
||||
for(size_t j=1;j<network[network.size()-1].size();j++)
|
||||
{
|
||||
deltas[network.size()-1][j]= correction(expectation[j-1],network[network.size()-1]->operator[](j)->output())
|
||||
*network[network.size()-1]->operator[](j)->derivatedOutput();
|
||||
deltas[network.size()-1][j]= correction(expectation[j-1],network[network.size()-1][j].output())
|
||||
*network[network.size()-1][j].derivatedOutput();
|
||||
}
|
||||
|
||||
for(int i=(int)network.size()-2;i>0;i--)
|
||||
@@ -43,51 +43,50 @@ void Shin::NeuronNetwork::Learning::BackPropagation::propagate(const Shin::Neuro
|
||||
size_t s=0;
|
||||
//TODO THIS IS NOT WORKING!!!
|
||||
#define THREADS 4
|
||||
int step =network[i]->size()/THREADS;
|
||||
int step =network[i].size()/THREADS;
|
||||
for(int t=1;t<=THREADS;t++)
|
||||
{
|
||||
if(s>=network[i]->size())
|
||||
if(s>=network[i].size())
|
||||
break;
|
||||
th.push_back(std::thread([&i,this](size_t from, size_t to)->void{
|
||||
for(size_t j=from;j<to;j++)
|
||||
{
|
||||
register float deltasWeight = 0;
|
||||
for(size_t k=1;k<this->network[i+1]->size();k++)
|
||||
for(size_t k=1;k<this->network[i+1].size();k++)
|
||||
{
|
||||
deltasWeight+=deltas[i+1][k]*this->network[i+1]->operator[](k)->getWeight(j);
|
||||
deltasWeight+=deltas[i+1][k]*this->network[i+1][k].getWeight(j);
|
||||
}
|
||||
//deltas[i][j]*=this->network[i]->operator[](j)->derivatedOutput(); // WHY THE HELL IS SEQ here??
|
||||
}
|
||||
},s,t==THREADS?network[i]->size():s+step));//{}
|
||||
},s,t==THREADS?network[i].size():s+step));//{}
|
||||
s+=step;
|
||||
}
|
||||
for (auto& thr : th)
|
||||
thr.join();
|
||||
}else
|
||||
{
|
||||
for(size_t j=0;j<network[i]->size();j++)
|
||||
for(size_t j=0;j<network[i].size();j++)
|
||||
{
|
||||
register float deltasWeight = 0;
|
||||
for(size_t k=1;k<this->network[i+1]->size();k++)
|
||||
for(size_t k=1;k<this->network[i+1].size();k++)
|
||||
{
|
||||
deltasWeight+=deltas[i+1][k]*this->network[i+1]->operator[](k)->getWeight(j);
|
||||
deltasWeight+=deltas[i+1][k]*this->network[i+1][k].getWeight(j);
|
||||
}
|
||||
deltas[i][j]=deltasWeight*this->network[i]->operator[](j)->derivatedOutput();
|
||||
deltas[i][j]=deltasWeight*this->network[i][j].derivatedOutput();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for(size_t i=1;i<network.size();i++)
|
||||
{
|
||||
size_t max=network[i-1]->size();
|
||||
size_t max=network[i-1].size();
|
||||
|
||||
for(size_t j=1;j<network[i]->size();j++)
|
||||
for(size_t j=1;j<network[i].size();j++)
|
||||
{
|
||||
network[i]->operator[](j)->setWeight(0,network[i]->operator[](j)->getWeight(0)+deltas[i][j]*learningCoeficient);
|
||||
network[i][j].setWeight(0,network[i][j].getWeight(0)+deltas[i][j]*learningCoeficient);
|
||||
for(size_t k=1;k<max;k++)
|
||||
{
|
||||
network[i]->operator[](j)->setWeight(k,
|
||||
network[i]->operator[](j)->getWeight(k)+learningCoeficient* deltas[i][j]*network[i-1]->operator[](k)->output());
|
||||
network[i][j].setWeight(k, network[i][j].getWeight(k)+learningCoeficient*deltas[i][j]*network[i-1][k].output());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -112,8 +111,6 @@ float Shin::NeuronNetwork::Learning::BackPropagation::teach(const Shin::NeuronNe
|
||||
propagate(solution);
|
||||
}
|
||||
|
||||
|
||||
// std::cerr << "error: " << error << "\n";
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#include <cstddef>
|
||||
|
||||
#include "../Solution.h"
|
||||
#include "../FeedForwardQuick.h"
|
||||
#include "../FeedForward.h"
|
||||
#include "Supervised"
|
||||
|
||||
/*
|
||||
@@ -29,7 +29,7 @@ namespace Learning
|
||||
class BackPropagation : public Supervised
|
||||
{
|
||||
public:
|
||||
BackPropagation(FeedForwardNetworkQuick &n);
|
||||
BackPropagation(FeedForward &n);
|
||||
virtual ~BackPropagation();
|
||||
|
||||
BackPropagation(const Shin::NeuronNetwork::Learning::BackPropagation&) =delete;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#include "./OpticalBackPropagation"
|
||||
|
||||
Shin::NeuronNetwork::Learning::OpticalBackPropagation::OpticalBackPropagation(FeedForwardNetworkQuick &n): BackPropagation(n)
|
||||
Shin::NeuronNetwork::Learning::OpticalBackPropagation::OpticalBackPropagation(FeedForward &n): BackPropagation(n)
|
||||
{
|
||||
setEntropySize(100);
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#include <cstddef>
|
||||
|
||||
#include "../Solution.h"
|
||||
#include "../FeedForwardQuick.h"
|
||||
#include "../FeedForward.h"
|
||||
#include "BackPropagation"
|
||||
|
||||
/*
|
||||
@@ -23,7 +23,7 @@ namespace Learning
|
||||
class OpticalBackPropagation : public BackPropagation
|
||||
{
|
||||
public:
|
||||
OpticalBackPropagation(FeedForwardNetworkQuick &n);
|
||||
OpticalBackPropagation(FeedForward &n);
|
||||
protected:
|
||||
virtual float correction(float expected, float computed) override;
|
||||
};
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#include <cstddef>
|
||||
|
||||
#include "../Problem.h"
|
||||
#include "../FeedForwardQuick.h"
|
||||
#include "../FeedForward.h"
|
||||
#include "BackPropagation"
|
||||
#include "Unsupervised"
|
||||
#include "RL/QFunction.h"
|
||||
|
||||
@@ -75,7 +75,7 @@ void Shin::NeuronNetwork::RL::QFunctionNetwork::initialiseNetwork(size_t input,
|
||||
{
|
||||
if(function == nullptr)
|
||||
{
|
||||
function = new FeedForwardNetworkQuick({(int)input,(int)size,(int)choices});
|
||||
function = new FeedForward({(int)input,(int)size,(int)choices});
|
||||
b= new Learning::BackPropagation(*function);
|
||||
}
|
||||
}
|
||||
@@ -107,7 +107,7 @@ void Shin::NeuronNetwork::RL::QFunctionNetwork::learnDelayed(std::vector< std::p
|
||||
solSize=function->solve(p[0].first).size();
|
||||
if (!solSize)
|
||||
return;
|
||||
for(int i=0;i<p.size();i++)
|
||||
for(size_t i=0;i<p.size();i++)
|
||||
{
|
||||
Solution s;
|
||||
for(int j=0;j<solSize;j++)
|
||||
@@ -143,7 +143,7 @@ void Shin::NeuronNetwork::RL::QFunctionNetwork::learn(Shin::NeuronNetwork::Solut
|
||||
}
|
||||
}
|
||||
|
||||
void Shin::NeuronNetwork::RL::QFunctionNetwork::learn(Shin::NeuronNetwork::Problem& p, int choice, float quality)
|
||||
void Shin::NeuronNetwork::RL::QFunctionNetwork::learn(Shin::NeuronNetwork::Problem& p, int, float quality)
|
||||
{
|
||||
if(quality>0)
|
||||
{
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
#define _Q_FUNCTION_H_
|
||||
|
||||
#include "../../Solution"
|
||||
#include "../../FeedForwardQuick"
|
||||
#include "../../FeedForward"
|
||||
#include "../BackPropagation.h"
|
||||
#include "../OpticalBackPropagation.h"
|
||||
#include <map>
|
||||
@@ -85,7 +85,7 @@ namespace RL
|
||||
void opticalBackPropagation() {delete b; b=new Learning::OpticalBackPropagation(*function);};
|
||||
private:
|
||||
Learning::BackPropagation *b;
|
||||
FeedForwardNetworkQuick * function;
|
||||
FeedForward * function;
|
||||
float learningA=0.05;
|
||||
float learningB=0.008;
|
||||
};
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#include "./Reinforcement"
|
||||
|
||||
Shin::NeuronNetwork::Learning::Reinforcement::Reinforcement(Shin::NeuronNetwork::FeedForwardNetworkQuick& n): Unsupervised(n), p(new BackPropagation(n))
|
||||
Shin::NeuronNetwork::Learning::Reinforcement::Reinforcement(Shin::NeuronNetwork::FeedForward& n): Unsupervised(n), p(new BackPropagation(n))
|
||||
{
|
||||
p->setLearningCoeficient(1);
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#include <cstddef>
|
||||
|
||||
#include "../Problem.h"
|
||||
#include "../FeedForwardQuick.h"
|
||||
#include "../FeedForward.h"
|
||||
#include "BackPropagation"
|
||||
#include "Unsupervised"
|
||||
#include "functional"
|
||||
@@ -40,7 +40,7 @@ namespace Learning
|
||||
class Reinforcement : public Unsupervised
|
||||
{
|
||||
public:
|
||||
Reinforcement(FeedForwardNetworkQuick &n);
|
||||
Reinforcement(FeedForward &n);
|
||||
~Reinforcement();
|
||||
Reinforcement(const Reinforcement&) =delete;
|
||||
Reinforcement& operator=(const Reinforcement&) =delete;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#include "./Supervised"
|
||||
Shin::NeuronNetwork::Learning::Supervised::Supervised(Shin::NeuronNetwork::FeedForwardNetworkQuick& n) :network(n)
|
||||
Shin::NeuronNetwork::Learning::Supervised::Supervised(Shin::NeuronNetwork::FeedForward& n) :network(n)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#include <cstddef>
|
||||
|
||||
#include "../Solution.h"
|
||||
#include "../FeedForwardQuick.h"
|
||||
#include "../FeedForward.h"
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
@@ -17,7 +17,7 @@ namespace Learning
|
||||
{
|
||||
public:
|
||||
Supervised() =delete;
|
||||
Supervised(FeedForwardNetworkQuick &n);
|
||||
Supervised(FeedForward &n);
|
||||
virtual ~Supervised() {};
|
||||
float calculateError(const Solution &expectation,const Solution &solution);
|
||||
virtual float teach(const Shin::NeuronNetwork::Problem &p,const Solution &solution)=0;
|
||||
@@ -25,7 +25,7 @@ namespace Learning
|
||||
void debugOn();
|
||||
void debugOff();
|
||||
protected:
|
||||
FeedForwardNetworkQuick &network;
|
||||
FeedForward &network;
|
||||
bool debug=0;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#include "./Unsupervised"
|
||||
|
||||
Shin::NeuronNetwork::Learning::Unsupervised::Unsupervised(Shin::NeuronNetwork::FeedForwardNetworkQuick& n) :network(n)
|
||||
Shin::NeuronNetwork::Learning::Unsupervised::Unsupervised(Shin::NeuronNetwork::FeedForward& n) :network(n)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#include <cstddef>
|
||||
|
||||
#include "../Solution.h"
|
||||
#include "../FeedForwardQuick.h"
|
||||
#include "../FeedForward.h"
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
@@ -17,12 +17,12 @@ namespace Learning
|
||||
{
|
||||
public:
|
||||
Unsupervised() =delete;
|
||||
Unsupervised(FeedForwardNetworkQuick &n);
|
||||
Unsupervised(FeedForward &n);
|
||||
virtual ~Unsupervised() {};
|
||||
void debugOn();
|
||||
void debugOff();
|
||||
protected:
|
||||
FeedForwardNetworkQuick &network;
|
||||
FeedForward &network;
|
||||
bool debug=0;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
OBJFILES= Neuron.o Network.o FeedForward.o FeedForwardQuick.o\
|
||||
OBJFILES=\
|
||||
FeedForward.o\
|
||||
Learning/Supervised.o Learning/BackPropagation.o Learning/OpticalBackPropagation.o\
|
||||
Learning/Unsupervised.o Learning/Reinforcement.o Learning/RL/QFunction.o Learning/QLearning.o\
|
||||
Solution.o Problem.o ./IO.o
|
||||
@@ -16,7 +17,7 @@ lib: $(LIBNAME).so $(LIBNAME).a
|
||||
$(LIBNAME).so: $(OBJFILES)
|
||||
$(CXX) -shared $(CXXFLAGS) $(OBJFILES) $(LINKFILES) -o $(LIBNAME).so
|
||||
|
||||
$(LIBNAME).a: $(OBJFILES)
|
||||
$(LIBNAME).a: $(OBJFILES) ./Neuron.h ./Network.h
|
||||
rm -f $(LIBNAME).a # create new library
|
||||
ar rcv $(LIBNAME).a $(OBJFILES) $(LINKFILES)
|
||||
ranlib $(LIBNAME).a
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
#include "Network"
|
||||
|
||||
using namespace Shin::NeuronNetwork;
|
||||
|
||||
Layer::Layer(int a):neurons()
|
||||
{
|
||||
while(a--)
|
||||
{
|
||||
neurons.push_back(new Neuron());
|
||||
}
|
||||
}
|
||||
|
||||
Layer::~Layer()
|
||||
{
|
||||
for(Neuron *n:neurons)
|
||||
{
|
||||
delete n;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Solution Layer::solve(const std::vector<float> &input)
|
||||
{
|
||||
Solution ret;
|
||||
for(Neuron *n:neurons)
|
||||
{
|
||||
ret.push_back(n->output(input));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
Neuron* Layer::operator[](int neuron) const
|
||||
{
|
||||
return neurons[neuron];
|
||||
}
|
||||
|
||||
@@ -10,46 +10,44 @@
|
||||
#include <initializer_list>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
namespace NeuronNetwork
|
||||
{
|
||||
const float lambda=0.8;
|
||||
class Layer
|
||||
{
|
||||
public:
|
||||
virtual ~Layer() {};
|
||||
virtual Neuron& operator[](size_t neuron)=0;
|
||||
virtual size_t size() const=0;
|
||||
};
|
||||
|
||||
class Network
|
||||
{
|
||||
public:
|
||||
inline Network(double lam):lambda(lam) {};
|
||||
virtual ~Network() {};
|
||||
|
||||
virtual Solution solve(const Problem&)=0;
|
||||
virtual Layer& operator[](size_t layer)=0;
|
||||
inline float getLambda() const {return lambda;}
|
||||
|
||||
protected:
|
||||
float lambda;
|
||||
private:
|
||||
};
|
||||
|
||||
class ACyclicNetwork : public Network
|
||||
{
|
||||
public:
|
||||
inline ACyclicNetwork(double lam):Network(lam) {};
|
||||
virtual size_t size() const=0;
|
||||
protected:
|
||||
private:
|
||||
};
|
||||
|
||||
class Layer
|
||||
{
|
||||
public:
|
||||
Layer(int a);
|
||||
Layer(const Layer &l):neurons()
|
||||
{
|
||||
for(unsigned i=0;i<l.neurons.size();i++)
|
||||
{
|
||||
neurons.push_back(new Neuron(*l.neurons[i]));
|
||||
}
|
||||
}
|
||||
~Layer();
|
||||
Solution solve(const std::vector<float> &input);
|
||||
Neuron* operator[](int neuron) const;
|
||||
int size() const {return neurons.size();};
|
||||
protected:
|
||||
std::vector<Neuron*> neurons;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,51 +0,0 @@
|
||||
#include "./Neuron"
|
||||
|
||||
using namespace Shin::NeuronNetwork;
|
||||
|
||||
Neuron::Neuron(): potential(1),weights()
|
||||
{
|
||||
|
||||
}
|
||||
float Neuron::getPotential() const
|
||||
{
|
||||
return potential;
|
||||
}
|
||||
|
||||
void Neuron::setPotential(float p)
|
||||
{
|
||||
potential=p;
|
||||
}
|
||||
|
||||
float Neuron::getWeight(unsigned int i) const
|
||||
{
|
||||
if(i >= weights.size())
|
||||
{
|
||||
return 1.0;
|
||||
}
|
||||
return weights[0];
|
||||
}
|
||||
|
||||
void Neuron::setWeight(unsigned int i,float p)
|
||||
{
|
||||
if(i >= weights.size())
|
||||
{
|
||||
// std::cout << "resize to " << i;
|
||||
weights.resize(i+1);
|
||||
}
|
||||
// std::cerr << "Set " << i << " to " << p << "\n";
|
||||
weights[i]=p;
|
||||
}
|
||||
|
||||
float Neuron::output(std::vector<float> input)
|
||||
{
|
||||
register float sum=0;
|
||||
for(unsigned int i=0;i<input.size();i++)
|
||||
{
|
||||
// std::cerr << "W: " << getWeight(i) <<"\n";
|
||||
sum+=getWeight(i)*input[i];
|
||||
}
|
||||
return 1.0/(1.0+exp(-0.5*sum));
|
||||
if(sum <= getPotential())
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
@@ -1,8 +1,7 @@
|
||||
#ifndef _S_NN_NEURON_H_
|
||||
#define _S_NN_NEURON_H_
|
||||
|
||||
#include <vector>
|
||||
#include <math.h>
|
||||
#include <cstdarg>
|
||||
|
||||
namespace Shin
|
||||
{
|
||||
@@ -11,26 +10,18 @@ namespace NeuronNetwork
|
||||
class Neuron
|
||||
{
|
||||
public:
|
||||
Neuron();
|
||||
Neuron(const Neuron &n):potential(n.potential),weights(n.weights)
|
||||
{
|
||||
|
||||
}
|
||||
float getPotential() const;
|
||||
void setPotential(float p);
|
||||
float getWeight(unsigned int) const;
|
||||
void setWeight(unsigned int i,float p);
|
||||
float output(const std::vector<float>);
|
||||
float output() { return lastOutput;}
|
||||
Neuron() {};
|
||||
virtual ~Neuron() {};
|
||||
virtual float getPotential() const =0;
|
||||
virtual void setPotential(float p) =0;
|
||||
|
||||
virtual float getWeight(size_t) const =0;
|
||||
virtual void setWeight(size_t i,float p) =0;
|
||||
|
||||
virtual float output() const =0;
|
||||
virtual float input() const=0;
|
||||
virtual float derivatedOutput() const=0;
|
||||
protected:
|
||||
double potential;
|
||||
private:
|
||||
std::vector<float> weights;
|
||||
float lastOutput=0.0;
|
||||
float lastInput=0.0;
|
||||
};
|
||||
class SimpleNeuron: public Neuron
|
||||
{
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
1
|
||||
@@ -2,16 +2,19 @@ include ../Makefile.const
|
||||
|
||||
OPTIMALIZATION=
|
||||
LIB_DIR = ../lib
|
||||
GEN_TESTS=g-01 g-02
|
||||
NN_TESTS=\
|
||||
#GEN_TESTS=g-01 g-02
|
||||
|
||||
NN_TESTEABLE=\
|
||||
nn-01 nn-02 nn-03 nn-bp-sppeed \
|
||||
nn-bp-xor \
|
||||
nn-obp-xor \
|
||||
nn-rl-xor nn-rl-and nn-rl-qfun\
|
||||
nn-reinforcement nn-04
|
||||
# nn-test nn-rl-qfun\
|
||||
nn-rl-xor nn-rl-and nn-rl-xor2\
|
||||
nn-reinforcement nn-04 \
|
||||
nn-pong
|
||||
|
||||
ALL_TESTS=$(NN_TESTS) $(GEN_TESTS)
|
||||
NN_TESTS= $(NN_TESTEABLE) nn-pong
|
||||
|
||||
ALL_TESTS=$(NN_TESTEABLE) $(GEN_TESTS)
|
||||
|
||||
LIBS=$(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuronNetwork.a
|
||||
#LIBS=-lGenetics.so -lNeuronNetwork
|
||||
@@ -22,6 +25,7 @@ all:| lib $(ALL_TESTS);
|
||||
|
||||
gen: $(GEN_TESTS)
|
||||
|
||||
|
||||
test: all
|
||||
@for i in $(ALL_TESTS); do echo -n ./$$i; echo -n " - "; ./$$i; echo ""; done
|
||||
|
||||
@@ -31,6 +35,9 @@ g-%: g-%.cpp $(LIB_DIR)/Genetics.a
|
||||
nn-%: nn-%.cpp $(LIB_DIR)/NeuronNetwork.a
|
||||
$(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuronNetwork.a -lm
|
||||
|
||||
nn-pong: ./nn-pong.cpp $(LIB_DIR)/NeuronNetwork.a
|
||||
$(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuronNetwork.a -lm -lalleg -lGL
|
||||
|
||||
lib:
|
||||
make -C ../
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/FeedForwardQuick"
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/BackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
@@ -27,7 +27,7 @@ int main(int argc,char**)
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0})));
|
||||
p.push_back(X(std::vector<bool>({1})));
|
||||
|
||||
Shin::NeuronNetwork::FeedForwardNetworkQuick q({1,5000,5000,15000,2});
|
||||
Shin::NeuronNetwork::FeedForward q({1,5000,5000,15000,2});
|
||||
Shin::NeuronNetwork::Learning::BackPropagation b(q);
|
||||
if(argc > 1)
|
||||
{
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/FeedForwardQuick.h"
|
||||
#include "../src/NeuronNetwork/FeedForward.h"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
@@ -15,24 +15,24 @@ class X: public Shin::NeuronNetwork::Problem
|
||||
|
||||
int main()
|
||||
{
|
||||
Shin::NeuronNetwork::FeedForwardNetwork n({2,4,2});
|
||||
Shin::NeuronNetwork::FeedForwardNetworkQuick nq({2,4,2});
|
||||
if(n[1]->size() != 4)
|
||||
Shin::NeuronNetwork::FeedForward n({2,4,2});
|
||||
Shin::NeuronNetwork::FeedForward nq({2,4,2});
|
||||
if(n[1].size() != 4)
|
||||
{
|
||||
std::cout << "Actual size:" << n[0]->size();
|
||||
std::cout << "Actual size:" << n[0].size();
|
||||
return 1;
|
||||
}
|
||||
if(nq[1]->size() != 4)
|
||||
if(nq[1].size() != 4)
|
||||
{
|
||||
std::cout << "QUICK Actual size:" << nq[0]->size();
|
||||
std::cout << "QUICK Actual size:" << nq[0].size();
|
||||
return 1;
|
||||
}
|
||||
|
||||
n[2]->operator[](0)->setPotential(25);
|
||||
nq[2]->operator[](0)->setPotential(25);
|
||||
n[2][0].setPotential(25);
|
||||
nq[2][0].setPotential(25);
|
||||
|
||||
std::cout << "Potential: " << n[2]->operator[](0)->getPotential() << "\n";
|
||||
std::cout << "Potential: " << nq[2]->operator[](0)->getPotential() << "\n";
|
||||
std::cout << "Potential: " << n[2][0].getPotential() << "\n";
|
||||
std::cout << "Potential: " << nq[2][0].getPotential() << "\n";
|
||||
|
||||
Shin::NeuronNetwork::Solution s =n.solve(X());
|
||||
Shin::NeuronNetwork::Solution sq =nq.solve(X());
|
||||
@@ -51,8 +51,8 @@ int main()
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
n[2]->operator[](0)->setWeight(0,26.0);
|
||||
nq[2]->operator[](0)->setWeight(0,26.0);
|
||||
n[2][0].setWeight(0,26.0);
|
||||
nq[2][0].setWeight(0,26.0);
|
||||
|
||||
s =n.solve(X());
|
||||
sq =n.solve(X());
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/FeedForwardQuick"
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/BackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
@@ -33,7 +33,7 @@ int main()
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({1})));
|
||||
p.push_back(X(std::vector<float>({1,1})));
|
||||
|
||||
Shin::NeuronNetwork::FeedForwardNetworkQuick q({2,4,1});
|
||||
Shin::NeuronNetwork::FeedForward q({2,4,1});
|
||||
Shin::NeuronNetwork::Learning::BackPropagation b(q);
|
||||
b.setLearningCoeficient(10);
|
||||
|
||||
|
||||
@@ -1,20 +1,16 @@
|
||||
#include "../src/NeuronNetwork/Network"
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
|
||||
#include <iostream>
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
{
|
||||
public: X(bool x,bool y):x(x),y(y) {}
|
||||
protected: std::vector<float> representation() const { return std::vector<float>({x,y}); }
|
||||
private:
|
||||
bool x;
|
||||
bool y;
|
||||
public: X(bool x,bool y):Problem() {data.push_back(x);data.push_back(y);}
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
srand(time(NULL));
|
||||
int lm=5;
|
||||
Shin::NeuronNetwork::FeedForwardNetwork net({2,lm,1});
|
||||
Shin::NeuronNetwork::FeedForward net({2,lm,1});
|
||||
bool x=1;
|
||||
int prev_err=0;
|
||||
int err=0;
|
||||
@@ -42,10 +38,10 @@ int main()
|
||||
w=rand()%2;
|
||||
if(l==2)
|
||||
n=0;
|
||||
pot=net[l]->operator[](n)->getPotential();
|
||||
net[l]->operator[](n)->setPotential(pot*(rand()%21+90)/100);
|
||||
wei=net[l]->operator[](n)->getWeight(w);
|
||||
net[l]->operator[](n)->setWeight(w,wei*(rand()%21+90)/100);
|
||||
pot=net[l][n].getPotential();
|
||||
net[l][n].setPotential(pot*(rand()%21+90)/100);
|
||||
wei=net[l][n].getWeight(w);
|
||||
net[l][n].setWeight(w,wei*(rand()%21+90)/100);
|
||||
|
||||
for(int i=0;i<100;i++)
|
||||
{
|
||||
@@ -58,10 +54,9 @@ int main()
|
||||
|
||||
if(err > prev_err)
|
||||
{
|
||||
net[l]->operator[](n)->setPotential(pot);
|
||||
net[l]->operator[](n)->setWeight(w,wei);
|
||||
net[l][n].setPotential(pot);
|
||||
net[l][n].setWeight(w,wei);
|
||||
};
|
||||
// std::cout << "C: " << c << " err: " << err << " prev: "<<prev_err << "\n";
|
||||
prev_err=err;
|
||||
if(err <1)
|
||||
x=0;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/FeedForwardQuick"
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/BackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
@@ -31,7 +31,7 @@ int main(int argc, char**)
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0})));
|
||||
p.push_back(X(std::vector<float>({1})));
|
||||
|
||||
Shin::NeuronNetwork::FeedForwardNetworkQuick q({1,5000,5000,5000,1});
|
||||
Shin::NeuronNetwork::FeedForward q({1,5000,5000,5000,1});
|
||||
Shin::NeuronNetwork::Learning::BackPropagation b(q);
|
||||
|
||||
if(argc >1)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#include "../src/NeuronNetwork/FeedForwardQuick"
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/BackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
@@ -16,7 +16,7 @@ int main()
|
||||
|
||||
for (int test=0;test<2;test++)
|
||||
{
|
||||
Shin::NeuronNetwork::FeedForwardNetworkQuick q({2,3,1});
|
||||
Shin::NeuronNetwork::FeedForward q({2,3,1});
|
||||
Shin::NeuronNetwork::Learning::BackPropagation b(q);
|
||||
|
||||
srand(time(NULL));
|
||||
@@ -67,5 +67,13 @@ int main()
|
||||
if(err <0.001)
|
||||
break;
|
||||
}
|
||||
for(auto a:p)
|
||||
{
|
||||
delete a;
|
||||
}
|
||||
for(auto a:s)
|
||||
{
|
||||
delete a;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
#include "../src/NeuronNetwork/FeedForwardQuick"
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/OpticalBackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
@@ -16,7 +16,7 @@ int main()
|
||||
|
||||
for (int test=0;test<2;test++)
|
||||
{
|
||||
Shin::NeuronNetwork::FeedForwardNetworkQuick q({2,40,1});
|
||||
Shin::NeuronNetwork::FeedForward q({2,40,1});
|
||||
Shin::NeuronNetwork::Learning::OpticalBackPropagation b(q);
|
||||
|
||||
srand(time(NULL));
|
||||
|
||||
344
tests/nn-pong.cpp
Normal file
344
tests/nn-pong.cpp
Normal file
@@ -0,0 +1,344 @@
|
||||
#include <allegro.h>
|
||||
#include <cstdlib>
|
||||
#include <time.h>
|
||||
#include "../src/NeuronNetwork/Learning/QLearning.h"
|
||||
#include <sys/time.h>
|
||||
|
||||
int learningGames=6000;
|
||||
|
||||
int ball_x = 320;
|
||||
int ball_y = 240;
|
||||
|
||||
int ball_tempX = 320;
|
||||
int ball_tempY = 240;
|
||||
|
||||
int p1_x = 20;
|
||||
int p1_y = 210;
|
||||
|
||||
int p1_tempX = 20;
|
||||
int p1_tempY = 210;
|
||||
|
||||
int p2_x = 620;
|
||||
int p2_y = 210;
|
||||
|
||||
int p2_tempX = 620;
|
||||
int p2_tempY = 210;
|
||||
|
||||
int i=0;
|
||||
|
||||
long game=0;
|
||||
int q=0;
|
||||
int speed=1;
|
||||
|
||||
bool randomLearner=0;
|
||||
|
||||
int dir; //This will keep track of the circles direction
|
||||
//1= up and left, 2 = down and left, 3= up and right, 4 = down and right
|
||||
|
||||
BITMAP *buffer; //This will be our temporary bitmap for double buffering
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
{
|
||||
public:
|
||||
X(int p1,int ballX,int ballY,int p2)//, int ballY)
|
||||
{
|
||||
data.push_back((float)p1/480.0);
|
||||
data.push_back((float)ballX/640.0);
|
||||
data.push_back((float)ballY/480.0);
|
||||
}
|
||||
};
|
||||
|
||||
Shin::NeuronNetwork::Learning::QLearning l(3,15,3);
|
||||
|
||||
std::vector <std::pair<Shin::NeuronNetwork::Problem,int>> p1x;
|
||||
|
||||
void propagateOKtoP1(double quality=10)
|
||||
{
|
||||
l.learnDelayed(p1x,quality);
|
||||
p1x.clear();
|
||||
}
|
||||
|
||||
void moveBall(){
|
||||
|
||||
ball_tempX = ball_x;
|
||||
ball_tempY = ball_y;
|
||||
|
||||
if (dir == 1 && ball_x > 5 && ball_y > 5){
|
||||
|
||||
if( ball_x == p1_x + 15 && ball_y >= p1_y && ball_y <= p1_y + 60){
|
||||
dir = rand()% 2 + 3;
|
||||
propagateOKtoP1(100);
|
||||
}else{
|
||||
--ball_x;
|
||||
--ball_y;
|
||||
}
|
||||
|
||||
} else if (dir == 2 && ball_x > 5 && ball_y < 475){
|
||||
|
||||
if( ball_x == p1_x + 15 && ball_y >= p1_y && ball_y <= p1_y + 60){
|
||||
dir = rand()% 2 + 3;
|
||||
propagateOKtoP1(100);
|
||||
}else{
|
||||
--ball_x;
|
||||
++ball_y;
|
||||
}
|
||||
|
||||
} else if (dir == 3 && ball_x < 635 && ball_y > 5){
|
||||
|
||||
if( ball_x + 5 == p2_x && ball_y >= p2_y && ball_y <= p2_y + 60){
|
||||
dir = rand()% 2 + 1;
|
||||
}else{
|
||||
++ball_x;
|
||||
--ball_y;
|
||||
}
|
||||
|
||||
} else if (dir == 4 && ball_x < 635 && ball_y < 475){
|
||||
|
||||
if( ball_x + 5 == p2_x && ball_y >= p2_y && ball_y <= p2_y + 60){
|
||||
dir = rand()% 2 + 1;
|
||||
}else{
|
||||
++ball_x;
|
||||
++ball_y;
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
if (dir == 1 || dir == 3) ++dir;
|
||||
else if (dir == 2 || dir == 4) --dir;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
char p1Move(){
|
||||
|
||||
X p=X(p1_y,ball_x,ball_y,p2_y);
|
||||
|
||||
if(game <learningGames)
|
||||
{
|
||||
if(randomLearner)
|
||||
{
|
||||
register int tmp=game%3;
|
||||
if(rand()%5==0)
|
||||
{
|
||||
tmp=(tmp+rand())%3;
|
||||
}
|
||||
if(tmp==1)
|
||||
{
|
||||
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,2));//,ball_tempX,ball_tempY));
|
||||
return 1;
|
||||
}else if(tmp==0)
|
||||
{
|
||||
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,0));//,ball_tempX,ball_tempY));
|
||||
return -1;
|
||||
}else
|
||||
{
|
||||
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,1));//,ball_tempX,ball_tempY));
|
||||
return 0;
|
||||
}
|
||||
}else
|
||||
{
|
||||
if( p1_tempY > ball_y && p1_y > 0){
|
||||
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,0));//,ball_tempX,ball_tempY));
|
||||
return -1;
|
||||
} else if( p1_tempY < ball_y && p1_y < 420){
|
||||
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,2));//,ball_tempX,ball_tempY));
|
||||
return 1;
|
||||
}else
|
||||
{
|
||||
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,1));//,ball_tempX,ball_tempY));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
int j=l.getChoice(p);
|
||||
|
||||
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,j));//,ball_tempX,ball_tempY));
|
||||
|
||||
return j-1;
|
||||
}
|
||||
|
||||
char p2Move(){
|
||||
if(game >= learningGames)
|
||||
{
|
||||
if(key[KEY_UP])
|
||||
return 1;
|
||||
else if( key[KEY_DOWN])
|
||||
return -1;
|
||||
else
|
||||
return 0;
|
||||
}else
|
||||
{
|
||||
if(rand()%10==0)
|
||||
{
|
||||
return (rand()%3)-1;
|
||||
}
|
||||
if( p2_tempY > ball_y){
|
||||
return -1;
|
||||
} else if( p2_tempY < ball_y){
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void startNew(){
|
||||
|
||||
clear_keybuf();
|
||||
if(game==learningGames)
|
||||
textout_ex( screen, font, "Player 1 learned! Push a button to start a game.", 160, 240, makecol( 255, 0, 0), makecol( 0, 0, 0));
|
||||
|
||||
if(game >= learningGames)
|
||||
readkey();
|
||||
|
||||
clear_to_color( buffer, makecol( 0, 0, 0));
|
||||
ball_x = 350;
|
||||
ball_y = rand()%481;
|
||||
|
||||
p1_x = 20;
|
||||
p1_y = 210;
|
||||
|
||||
p2_x = 620;
|
||||
p2_y = 210;
|
||||
|
||||
}
|
||||
|
||||
|
||||
void checkWin(){
|
||||
|
||||
int won=0;
|
||||
if ( ball_x < p1_x){
|
||||
won=1;
|
||||
game++;
|
||||
textout_ex( screen, font, "Player 2 Wins!", 320, 240, makecol( 255, 0, 0), makecol( 0, 0, 0));
|
||||
propagateOKtoP1(-100);
|
||||
startNew();
|
||||
|
||||
} else if ( ball_x > p2_x){
|
||||
game++;
|
||||
won=1;
|
||||
textout_ex( screen, font, "Player 1 Wins!", 320, 240, makecol( 255, 0, 0), makecol( 0, 0, 0));
|
||||
propagateOKtoP1(100);
|
||||
startNew();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
void setupGame(){
|
||||
|
||||
acquire_screen();
|
||||
rectfill( buffer, p1_x, p1_y, p1_x + 10, p1_y + 60, makecol ( 0, 0, 255));
|
||||
rectfill( buffer, p2_x, p2_y, p2_x + 10, p2_y + 60, makecol ( 0, 0, 255));
|
||||
circlefill ( buffer, ball_x, ball_y, 5, makecol( 128, 255, 0));
|
||||
draw_sprite( screen, buffer, 0, 0);
|
||||
release_screen();
|
||||
srand( time(NULL));
|
||||
dir = rand() % 4 + 1;
|
||||
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char**argv)
|
||||
{
|
||||
allegro_init();
|
||||
install_keyboard();
|
||||
set_color_depth(16);
|
||||
set_gfx_mode( GFX_AUTODETECT_WINDOWED, 640, 480, 0, 0);
|
||||
|
||||
l.setLearningCoeficient(0.01,0.01);
|
||||
if(argc>=4 && argv[3][0]=='o')
|
||||
{
|
||||
std::cerr << "USING Optical Backpropagation\n";
|
||||
l.opticalBackPropagation();
|
||||
}
|
||||
if(argc>=3)
|
||||
{
|
||||
std::cerr << "Setting learning coefficients to:" << atof(argv[1]) << "," << atof(argv[2]) << "\n";
|
||||
l.setLearningCoeficient(atof(argv[1]),atof(argv[2]));
|
||||
}
|
||||
if(argc >=5)
|
||||
{
|
||||
std::cerr << "Setting learning games to:" << atof(argv[4]) << "\n";
|
||||
learningGames=atof(argv[4]);
|
||||
}
|
||||
if(argc >=6 && argv[5][0]=='r')
|
||||
{
|
||||
std::cerr << "Setting random learning\n";
|
||||
randomLearner=1;
|
||||
}
|
||||
buffer = create_bitmap( 640, 480);
|
||||
setupGame();
|
||||
speed=51;
|
||||
int sleepTime=1000;
|
||||
while(!key[KEY_ESC])
|
||||
{
|
||||
q++;
|
||||
if(key[KEY_T])
|
||||
{
|
||||
std::cout << "ADDING next 500 learning games\n";
|
||||
usleep(500000);
|
||||
learningGames+=500;
|
||||
}
|
||||
if(game < learningGames)
|
||||
{
|
||||
if( key[KEY_UP] && speed < 200){
|
||||
speed+=5;
|
||||
}else if( key[KEY_DOWN] && speed >1 ){
|
||||
speed-=5;
|
||||
}
|
||||
if(speed <= 0)
|
||||
{
|
||||
speed=1;
|
||||
}
|
||||
}else
|
||||
{
|
||||
speed=1;
|
||||
}
|
||||
|
||||
register char p1dir=p1Move();
|
||||
register char p2dir=p2Move();
|
||||
|
||||
p1_tempY = p1_y;
|
||||
p2_tempY = p2_y;
|
||||
|
||||
if(p1dir < 0 && p1_y > 0){
|
||||
--p1_y;
|
||||
} else if( p1dir > 0 && p1_y < 420){
|
||||
++p1_y;
|
||||
}
|
||||
if(p2dir > 0 && p2_y > 0){
|
||||
--p2_y;
|
||||
} else if( p2dir < 0 && p2_y < 420){
|
||||
++p2_y;
|
||||
}
|
||||
moveBall();
|
||||
if(key[KEY_PLUS_PAD] && sleepTime >=10)
|
||||
sleepTime-=50;
|
||||
else if(key[KEY_MINUS_PAD] && sleepTime <=15000)
|
||||
sleepTime+=50;
|
||||
|
||||
if(i%speed==0)
|
||||
{
|
||||
acquire_screen();
|
||||
rectfill( buffer, p1_tempX, p1_tempY, p1_tempX + 10, p1_tempY + 60, makecol ( 0, 0, 0));
|
||||
rectfill( buffer, p1_x, p1_y, p1_x + 10, p1_y + 60, makecol ( 0, 0, 255));
|
||||
|
||||
rectfill( buffer, p2_tempX, p2_tempY, p2_tempX + 10, p2_tempY + 60, makecol ( 0, 0, 0));
|
||||
rectfill( buffer, p2_x, p2_y, p2_x + 10, p2_y + 60, makecol ( 0, 0, 255));
|
||||
|
||||
circlefill ( buffer, ball_tempX, ball_tempY, 5, makecol( 0, 0, 0));
|
||||
circlefill ( buffer, ball_x, ball_y, 5, makecol( 128, 255, 0));
|
||||
draw_sprite( screen, buffer, 0, 0);
|
||||
release_screen();
|
||||
usleep(sleepTime);
|
||||
}
|
||||
checkWin();
|
||||
i++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
END_OF_MAIN()
|
||||
@@ -1,4 +1,4 @@
|
||||
#include "../src/NeuronNetwork/FeedForwardQuick"
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/Reinforcement.h"
|
||||
#include "../src/NeuronNetwork/Solution.h"
|
||||
|
||||
@@ -28,7 +28,7 @@ int main()
|
||||
|
||||
p.push_back(X(std::vector<float>({1,1})));
|
||||
|
||||
Shin::NeuronNetwork::FeedForwardNetworkQuick q({2,6,2});
|
||||
Shin::NeuronNetwork::FeedForward q({2,6,2});
|
||||
Shin::NeuronNetwork::Learning::Reinforcement b(q);
|
||||
b.getPropagator().setLearningCoeficient(1);
|
||||
int i=0;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#include "../src/NeuronNetwork/FeedForwardQuick"
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/Reinforcement.h"
|
||||
#include "../src/NeuronNetwork/Solution.h"
|
||||
|
||||
@@ -25,7 +25,7 @@ int main()
|
||||
p.push_back(new X(std::vector<float>({1,0})));
|
||||
p.push_back(new X(std::vector<float>({0,1})));
|
||||
|
||||
Shin::NeuronNetwork::FeedForwardNetworkQuick q({2,1});
|
||||
Shin::NeuronNetwork::FeedForward q({2,1});
|
||||
Shin::NeuronNetwork::Learning::Reinforcement b(q);
|
||||
int i=0;
|
||||
double targetQuality=0.5;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#include "../src/NeuronNetwork/FeedForwardQuick"
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/Reinforcement"
|
||||
#include "../src/NeuronNetwork/Learning/OpticalBackPropagation"
|
||||
|
||||
@@ -19,7 +19,7 @@ int main()
|
||||
srand(time(NULL));
|
||||
for (int test=0;test<3;test++)
|
||||
{
|
||||
Shin::NeuronNetwork::FeedForwardNetworkQuick q({2,4,1});
|
||||
Shin::NeuronNetwork::FeedForward q({2,4,1});
|
||||
Shin::NeuronNetwork::Learning::Reinforcement b(q);
|
||||
//b.setPropagator(new Shin::NeuronNetwork::Learning::OpticalBackPropagation(q));
|
||||
b.getPropagator().setLearningCoeficient(0.4);
|
||||
|
||||
99
tests/nn-rl-xor2.cpp
Normal file
99
tests/nn-rl-xor2.cpp
Normal file
@@ -0,0 +1,99 @@
|
||||
#include "../src/NeuronNetwork/Learning/QLearning.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :Problem(a) {}
|
||||
X(const std::vector<float> &a):Problem() {data=a;}
|
||||
};
|
||||
|
||||
float atof(char *s)
|
||||
{
|
||||
int f, m, sign, d=1;
|
||||
f = m = 0;
|
||||
|
||||
sign = (s[0] == '-') ? -1 : 1;
|
||||
if (s[0] == '-' || s[0] == '+') s++;
|
||||
|
||||
for (; *s != '.' && *s; s++) {
|
||||
f = (*s-'0') + f*10;
|
||||
}
|
||||
if (*s == '.')
|
||||
for (++s; *s; s++) {
|
||||
m = (*s-'0') + m*10;
|
||||
d *= 10;
|
||||
}
|
||||
return sign*(f + (float)m/d);
|
||||
}
|
||||
|
||||
float AA=10;
|
||||
float getQuality(X& p, int action)
|
||||
{
|
||||
if((p[0]==0&& p[1]==0) ||(p[0]==1&& p[1]==1)) //should be 0
|
||||
{
|
||||
return action==1?-AA:AA;
|
||||
}else // should be 1
|
||||
{
|
||||
return action==0?-AA:AA;
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
srand(time(NULL));
|
||||
|
||||
Shin::NeuronNetwork::Learning::QLearning l(2,45,2);
|
||||
if(argc==4 && argv[3][0]=='o')
|
||||
{
|
||||
std::cerr << "USING Optical Backpropagation\n";
|
||||
l.opticalBackPropagation();
|
||||
}
|
||||
if(argc>=3)
|
||||
{
|
||||
std::cerr << "Setting learning coefficients to:" << atof(argv[1]) << "," << atof(argv[2]) << "\n";
|
||||
l.setLearningCoeficient(atof(argv[1]),atof(argv[2]));
|
||||
}
|
||||
std::vector <std::pair<Shin::NeuronNetwork::Solution,Shin::NeuronNetwork::Problem>> p1x;
|
||||
|
||||
std::vector <X> states;
|
||||
states.push_back(X(std::vector<float>({1,0})));
|
||||
states.push_back(X(std::vector<float>({0,0})));
|
||||
states.push_back(X(std::vector<float>({1,1})));
|
||||
states.push_back(X(std::vector<float>({0,1})));
|
||||
|
||||
unsigned long step=0;
|
||||
double quality=0;
|
||||
while(step< 600000 && quality < (3.9*AA))
|
||||
{
|
||||
quality=0;
|
||||
if(step%10000==0)
|
||||
std::cerr << "STEP " << step << "\n";
|
||||
for(unsigned i=0;i<states.size();i++)
|
||||
{
|
||||
int choice=l.getChoice(states[i]);
|
||||
l.learn(states[i],choice,quality);
|
||||
}
|
||||
for(unsigned i=0;i<states.size();i++)
|
||||
{
|
||||
int choice=l.getChoice(states[i]);
|
||||
quality+=getQuality(states[i],choice);
|
||||
if(step%10000==0)
|
||||
{
|
||||
Shin::NeuronNetwork::Solution sol=l.getSolution(states[i]);
|
||||
std::cerr << "\tState: [" << states[i][0] << "," << states[i][1] << "] Q-function: [" << sol[0] << "," <<sol[1] << "] Action " << choice << "\n";
|
||||
}
|
||||
}
|
||||
step++;
|
||||
}
|
||||
std::cerr << step << "\n";
|
||||
for(unsigned i=0;i<states.size();i++)
|
||||
{
|
||||
Shin::NeuronNetwork::Solution sol=l.getSolution(states[i]);
|
||||
int choice=l.getChoice(states[i]);
|
||||
std::cerr << "State: [" << states[i][0] << "," << states[i][1] << "] Q-function: [" << sol[0] << "," <<sol[1] << "] Action " << choice << "\n";
|
||||
}
|
||||
}
|
||||
50
tests/nn-test.cpp
Normal file
50
tests/nn-test.cpp
Normal file
@@ -0,0 +1,50 @@
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/FeedForward"
|
||||
#include "../src/NeuronNetwork/Learning/BackPropagation"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
//typedef Shin::NeuronNetwork::Problem X;
|
||||
|
||||
class X: public Shin::NeuronNetwork::Problem
|
||||
{
|
||||
public:
|
||||
X(const X& a) :Problem(a) {}
|
||||
X(const std::vector<float> &a):Problem() {for(auto q:a){ data.push_back(q);}}
|
||||
protected:
|
||||
};
|
||||
int main(int argc,char**)
|
||||
{
|
||||
srand(time(NULL));
|
||||
std::vector<Shin::NeuronNetwork::Solution> s;
|
||||
std::vector<X> p;
|
||||
|
||||
p.push_back(X(std::vector<float>({0,0})));
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0.4,0.3,0.2,0.1})));
|
||||
p.push_back(X(std::vector<float>({0,0.5})));
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0.6,0.3,0.2,0.5})));
|
||||
p.push_back(X(std::vector<float>({0.4,0.5})));
|
||||
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0.4,0.4,0.2,0.8})));
|
||||
Shin::NeuronNetwork::FeedForward q({2,4,4,4},1.0);
|
||||
Shin::NeuronNetwork::Learning::BackPropagation bp(q);
|
||||
bp.setLearningCoeficient(0.2);
|
||||
for(int i=0;i<3;i++)
|
||||
{
|
||||
Shin::NeuronNetwork::Solution sp =q.solve(p[i]);
|
||||
std::cerr << sp[0] << "," << sp[1] << "," << sp[2] << "," << sp[3] << "\n";
|
||||
}
|
||||
for(int i=0;i<4;i++)
|
||||
{
|
||||
for(int j=0;j<3;j++)
|
||||
{
|
||||
bp.teach(p[j],s[j]);
|
||||
}
|
||||
}
|
||||
std::cerr << "XXXXXXXXXXXX\n";
|
||||
for(int i=0;i<3;i++)
|
||||
{
|
||||
Shin::NeuronNetwork::Solution sp =q.solve(p[i]);
|
||||
std::cerr << sp[0] << "," << sp[1] << "," << sp[2] << "," << sp[3] << "\n";
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user