cleaning and SSE2 check

This commit is contained in:
2014-12-12 16:38:55 +01:00
parent b4bee6f498
commit 2daac8b652
6 changed files with 32 additions and 76 deletions

View File

@@ -5,7 +5,7 @@ CXXFLAGS+= -std=c++14
CXXFLAGS+= -pg -fPIC
CXXFLAGS+= -g
CXXFLAGS+= -fPIC -pthread
CXXFLAGS+= -DUSE_SSE
CXXFLAGS+= -DUSE_SSE2
OPTIMALIZATION = -O3 -march=native -mtune=native
%.o : %.cpp %.h

View File

@@ -45,18 +45,6 @@ void Shin::NeuronNetwork::RL::QFunctionTable::learn(Shin::NeuronNetwork::Solutio
solution->second.second++;
}
}
void Shin::NeuronNetwork::RL::QFunctionTable::learnDelayed(std::vector< std::pair< Shin::NeuronNetwork::Solution, Shin::NeuronNetwork::Problem > >& p, float quality)
{
for(int i=p.size()-1;i>=0;i--)
{
auto &pair=p[i];
learn(pair.first,pair.second,quality);
quality*=0.3;
}
}
Shin::NeuronNetwork::RL::QFunctionNetwork::QFunctionNetwork() : b(nullptr),function(nullptr)
{
@@ -80,46 +68,6 @@ void Shin::NeuronNetwork::RL::QFunctionNetwork::initialiseNetwork(size_t input,
}
}
void Shin::NeuronNetwork::RL::QFunctionNetwork::learnDelayed(std::vector< std::pair< Shin::NeuronNetwork::Solution, Shin::NeuronNetwork::Problem > >& p, float quality)
{
if(quality>0)
{
b->setLearningCoeficient(learningA);
// b->setLearningCoeficient(0.05);
}else
{
b->setLearningCoeficient(learningB);
// b->setLearningCoeficient(0.008);
}
for(int i=p.size()-1;i>=0;i--)
{
learn(p[i].first,p[i].second,quality);
quality*=0.95;
}
}
void Shin::NeuronNetwork::RL::QFunctionNetwork::learnDelayed(std::vector< std::pair< Shin::NeuronNetwork::Problem,int> >& p, float quality) // TODO there must be better way
{
std::vector<std::pair<Solution,Problem>> q;
register int solSize=0;
if(p.size()>0)
solSize=function->solve(p[0].first).size();
if (!solSize)
return;
for(size_t i=0;i<p.size();i++)
{
Solution s;
for(int j=0;j<solSize;j++)
{
s.push_back(j==p[i].second?1:0);
}
q.push_back(std::pair<Solution,Problem>(s,p[i].first));
}
learnDelayed(q,quality);
}
void Shin::NeuronNetwork::RL::QFunctionNetwork::learn(Shin::NeuronNetwork::Solution& s, Shin::NeuronNetwork::Problem& p, float quality)
{
register int loops=abs(quality)/10;

View File

@@ -29,4 +29,4 @@ void Shin::MachineLearning::QLearning::learnDelayed(std::vector< std::pair<Shin:
learn(pair.first,pair.second,quality);
quality*=0.3;
}
}
}

View File

@@ -7,12 +7,14 @@
#include "Unsupervised.h"
#include "../NeuralNetwork/FeedForward.h"
/*#include "BackPropagation.h"
#include "OpticalBackPropagation.h"
#include "../FeedForward.h"
#include "Unsupervised.h"
#include "QFunction.h"
/*
* #include "BackPropagation.h"
* #include "OpticalBackPropagation.h"
* #include "../FeedForward.h"
* #include "Unsupervised.h"
* #include "QFunction.h"
*/
/*
* http://www2.econ.iastate.edu/tesfatsi/RLUsersGuide.ICAC2005.pdf
* http://www.autonlab.org/tutorials/rl06.pdf
@@ -38,7 +40,7 @@ namespace Shin
{
namespace MachineLearning
{
class QLearning
class QLearning: Learning
{
public:
inline QLearning() {};
@@ -50,14 +52,14 @@ namespace MachineLearning
virtual void learnDelayed(std::vector<std::pair<Problem,Solution>> &p, float quality) final;
virtual void learnDelayed(std::vector<std::pair<Problem,int>> &p, float quality) final;
virtual void learn(Problem &p,Solution &s, float quality)=0;
virtual void learn(Problem &p,int action, float quality)=0;
virtual void learn(Problem &p,Solution &s, const float& quality)=0;
virtual void learn(Problem &p,int action, const float& quality)=0;
inline virtual void setLearningCoeficient(const float& a) {setLearningCoeficient(a,a);};
inline virtual void setLearningCoeficient(const float& a) override {setLearningCoeficient(a,a);};
inline void setLearningCoeficient(const float& ok, const float& err) {learningA=ok;learningB=err;};
virtual Solution getSolution(Problem &p)=0;
int getChoice(Problem &p);
virtual int getChoice(Problem &p)=0;
protected:
float learningA=0.05;
float learningB=0.008;
@@ -67,18 +69,20 @@ namespace MachineLearning
class QLearningNetwork : public QLearning
{
public:
QLearningNetwork(size_t input, size_t size, size_t choices): QLearning(),function({input,size,choices}) {};
QLearningNetwork(std::initializer_list<size_t> s): QLearning(),function(s) {};
QLearningNetwork(size_t input, size_t size, size_t actions): QLearning(),function({input,size,actions}),actionsSize(actions) {}
QLearningNetwork(std::initializer_list<size_t> s): QLearning(),function(s),actionsSize(*s.end()) {}
QLearningNetwork(const QLearningNetwork&)=delete;
QLearningNetwork operator=(const QLearningNetwork&)=delete;
virtual void learn(Problem &p,Solution &s, float quality) override;
virtual void learn(Problem &p,int action, float quality) override;
virtual void learn(Problem &p,Solution &s, const float& quality) override;
virtual void learn(Problem &p,int action, const float& quality) override;
virtual Solution getSolution(Problem &p) override {return function.solve(p);}
virtual int getChoice(Problem &p) override;
protected:
Shin::NeuralNetwork::FeedForward function;
size_t actionsSize;
};
class QLearningTable : public QLearning
@@ -89,10 +93,11 @@ namespace MachineLearning
QLearningTable(const QLearningTable&)=delete;
QLearningTable operator=(const QLearningTable&)=delete;
virtual void learn(Problem &p,Solution &s, float quality) override;
virtual void learn(Problem &p,int action, float quality) override;
virtual void learn(Problem &p,Solution &s, const float& quality) override;
virtual void learn(Problem &p,int action, const float& quality) override;
virtual Solution getSolution(Problem &p) override;
virtual int getChoice(Problem &p) override;
protected:
std::map<Problem,std::map<int,std::pair<float,int>>> data;
};

View File

@@ -134,8 +134,13 @@ void FeedForward::solvePart(float *newSolution, register size_t begin, size_t en
float x;
_mm_store_ss(&x, _mm_add_ss(temp, _mm_shuffle_ps(temp, 1)));
*/
#ifdef USE_SSE2
partialSolution= _mm_add_ps(_mm_movehl_ps(partialSolution, partialSolution), partialSolution);
partialSolution=_mm_add_ss(partialSolution, _mm_shuffle_ps(partialSolution,partialSolution, 1));
#else
partialSolution = _mm_hadd_ps(partialSolution, partialSolution);
partialSolution = _mm_hadd_ps(partialSolution, partialSolution);
#endif
_mm_store_ss(inputs[layer]+j,partialSolution);
partialSolution=_mm_mul_ps(temporaryConstLambda,partialSolution); //-lambda*sol[k]
partialSolution=exp_ps(partialSolution); //exp(sols)
@@ -175,17 +180,14 @@ Shin::Solution FeedForward::solve(const Shin::Problem& p)
std::vector<std::thread> th;
size_t s=1;
size_t step =layerSizes[i]/threads;
for(size_t t=1;t<=threads;t++)
for(size_t t=1;t<threads;t++)
{
//TODO do i need it to check?
if(s>=layerSizes[i])
break;
th.push_back(std::thread([i,this,newSolution,prevSize,sol](size_t from, size_t to)->void{
solvePart(newSolution,from,to,prevSize,sol,i);
},s,t==threads?layerSizes[i]:s+step));//{}
},s,s+step));
s+=step;
}
solvePart(newSolution,s,layerSizes[i],prevSize,sol,i);
for (auto& thr : th)
thr.join();
}else

View File

@@ -8,6 +8,7 @@
#include <vector>
#include <initializer_list>
#include <thread>
#include <pthread.h>
#include <iostream>
#include <math.h>