modified learning algorithms
This commit is contained in:
@@ -13,13 +13,14 @@ namespace Learning {
|
|||||||
class BackPropagation {
|
class BackPropagation {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
BackPropagation(): learningCoefficient(0.4) {
|
inline BackPropagation(FeedForward::Network &feedForwardNetwork): network(feedForwardNetwork), learningCoefficient(0.4), deltas() {
|
||||||
|
resize();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual ~BackPropagation() {
|
virtual ~BackPropagation() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void teach(FeedForward::Network &n,const std::vector<float> &input, const std::vector<float> &output);
|
void teach(const std::vector<float> &input, const std::vector<float> &output);
|
||||||
|
|
||||||
inline virtual void setLearningCoefficient (const float& coefficient) { learningCoefficient=coefficient; }
|
inline virtual void setLearningCoefficient (const float& coefficient) { learningCoefficient=coefficient; }
|
||||||
|
|
||||||
@@ -27,7 +28,22 @@ namespace Learning {
|
|||||||
inline virtual float correction(const float & expected, const float &computed) const {
|
inline virtual float correction(const float & expected, const float &computed) const {
|
||||||
return expected-computed;
|
return expected-computed;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
inline void resize() {
|
||||||
|
if(deltas.size()!=network.size())
|
||||||
|
deltas.resize(network.size());
|
||||||
|
|
||||||
|
for(std::size_t i=0; i < network.size(); i++) {
|
||||||
|
if(deltas[i].size()!=network[i].size())
|
||||||
|
deltas[i].resize(network[i].size());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
FeedForward::Network &network;
|
||||||
|
|
||||||
float learningCoefficient;
|
float learningCoefficient;
|
||||||
|
|
||||||
|
std::vector<std::vector<float>> deltas;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -11,7 +11,7 @@ namespace Learning {
|
|||||||
class OpticalBackPropagation : public BackPropagation {
|
class OpticalBackPropagation : public BackPropagation {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
OpticalBackPropagation(): BackPropagation() {
|
OpticalBackPropagation(FeedForward::Network &feedForwardNetwork): BackPropagation(feedForwardNetwork) {
|
||||||
|
|
||||||
}
|
}
|
||||||
virtual ~OpticalBackPropagation() {
|
virtual ~OpticalBackPropagation() {
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ float NeuralNetwork::BasisFunction::Linear::operator()(const std::vector<float>
|
|||||||
#ifndef USE_FMA
|
#ifndef USE_FMA
|
||||||
__m256 tmp;
|
__m256 tmp;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for(size_t k=0;k<alignedPrev;k+=8) {
|
for(size_t k=0;k<alignedPrev;k+=8) {
|
||||||
//TODO: assignement!! -- possible speedup
|
//TODO: assignement!! -- possible speedup
|
||||||
#ifdef USE_FMA
|
#ifdef USE_FMA
|
||||||
|
|||||||
@@ -3,17 +3,11 @@
|
|||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <immintrin.h>
|
#include <immintrin.h>
|
||||||
|
|
||||||
void NeuralNetwork::Learning::BackPropagation::teach(FeedForward::Network &network,const std::vector<float> &input, const std::vector<float> &expectation) {
|
void NeuralNetwork::Learning::BackPropagation::teach(const std::vector<float> &input, const std::vector<float> &expectation) {
|
||||||
|
|
||||||
network.computeOutput(input);
|
network.computeOutput(input);
|
||||||
std::vector<std::vector<float>> deltas;
|
|
||||||
|
|
||||||
deltas.resize(network.size());
|
resize();
|
||||||
|
|
||||||
for(std::size_t i=0; i < network.size(); i++) {
|
|
||||||
deltas[i].resize(network[i].size());
|
|
||||||
deltas[i][0]=0.0;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto& outputLayer=network[network.size()-1];
|
auto& outputLayer=network[network.size()-1];
|
||||||
for(std::size_t j=1;j<outputLayer.size();j++) {
|
for(std::size_t j=1;j<outputLayer.size();j++) {
|
||||||
@@ -43,23 +37,6 @@ void NeuralNetwork::Learning::BackPropagation::teach(FeedForward::Network &netwo
|
|||||||
std::size_t prevLayerSize=prevLayer.size();
|
std::size_t prevLayerSize=prevLayer.size();
|
||||||
std::size_t layerSize=layer.size();
|
std::size_t layerSize=layer.size();
|
||||||
|
|
||||||
/*
|
|
||||||
#ifdef USE_AVX
|
|
||||||
|
|
||||||
std::size_t alignedPrev=layerSize-layerSize%8;
|
|
||||||
|
|
||||||
__m256 learningCoefficientAVX=_mm256_set_ps(learningCoefficient, learningCoefficient, learningCoefficient, learningCoefficient,
|
|
||||||
learningCoefficient, learningCoefficient, learningCoefficient, learningCoefficient);
|
|
||||||
|
|
||||||
float* tmp =deltas[layerIndex].data();
|
|
||||||
for(std::size_t j=1;j<alignedPrev;j+=8) {
|
|
||||||
_mm256_storeu_ps(tmp+j,_mm256_mul_ps(learningCoefficientAVX,_mm256_loadu_ps(tmp+j)));
|
|
||||||
}
|
|
||||||
for(std::size_t j =alignedPrev; j < layerSize;j++) {
|
|
||||||
deltas[layerIndex][j]*=learningCoefficient;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
*/
|
|
||||||
for(std::size_t j=1;j<layerSize;j++) {
|
for(std::size_t j=1;j<layerSize;j++) {
|
||||||
|
|
||||||
deltas[layerIndex][j]*=learningCoefficient;
|
deltas[layerIndex][j]*=learningCoefficient;
|
||||||
|
|||||||
@@ -13,12 +13,12 @@ int main() {
|
|||||||
|
|
||||||
n.randomizeWeights();
|
n.randomizeWeights();
|
||||||
|
|
||||||
NeuralNetwork::Learning::BackPropagation prop;
|
NeuralNetwork::Learning::BackPropagation prop(n);
|
||||||
for(int i=0;i<10000;i++) {
|
for(int i=0;i<10000;i++) {
|
||||||
prop.teach(n,{1,0},{1});
|
prop.teach({1,0},{1});
|
||||||
prop.teach(n,{1,1},{0});
|
prop.teach({1,1},{0});
|
||||||
prop.teach(n,{0,0},{0});
|
prop.teach({0,0},{0});
|
||||||
prop.teach(n,{0,1},{1});
|
prop.teach({0,1},{1});
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -49,12 +49,12 @@ int main() {
|
|||||||
|
|
||||||
n.randomizeWeights();
|
n.randomizeWeights();
|
||||||
|
|
||||||
NeuralNetwork::Learning::BackPropagation prop;
|
NeuralNetwork::Learning::BackPropagation prop(n);
|
||||||
for(int i=0;i<10000;i++) {
|
for(int i=0;i<10000;i++) {
|
||||||
prop.teach(n,{1,1},{1});
|
prop.teach({1,1},{1});
|
||||||
prop.teach(n,{0,0},{0});
|
prop.teach({0,0},{0});
|
||||||
prop.teach(n,{0,1},{0});
|
prop.teach({0,1},{0});
|
||||||
prop.teach(n,{1,0},{0});
|
prop.teach({1,0},{0});
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -85,12 +85,12 @@ int main() {
|
|||||||
|
|
||||||
n.randomizeWeights();
|
n.randomizeWeights();
|
||||||
|
|
||||||
NeuralNetwork::Learning::BackPropagation prop;
|
NeuralNetwork::Learning::BackPropagation prop(n);
|
||||||
for(int i=0;i<10000;i++) {
|
for(int i=0;i<10000;i++) {
|
||||||
prop.teach(n,{1,1},{0});
|
prop.teach({1,1},{0});
|
||||||
prop.teach(n,{0,0},{1});
|
prop.teach({0,0},{1});
|
||||||
prop.teach(n,{0,1},{1});
|
prop.teach({0,1},{1});
|
||||||
prop.teach(n,{1,0},{1});
|
prop.teach({1,0},{1});
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -15,12 +15,12 @@ int main() {
|
|||||||
|
|
||||||
n.randomizeWeights();
|
n.randomizeWeights();
|
||||||
|
|
||||||
NeuralNetwork::Learning::BackPropagation prop;
|
NeuralNetwork::Learning::BackPropagation prop(n);
|
||||||
for(int i=0;i<100;i++) {
|
for(int i=0;i<100;i++) {
|
||||||
prop.teach(n,{1,0},{1});
|
prop.teach({1,0},{1});
|
||||||
prop.teach(n,{1,1},{0});
|
prop.teach({1,1},{0});
|
||||||
prop.teach(n,{0,0},{0});
|
prop.teach({0,0},{0});
|
||||||
prop.teach(n,{0,1},{1});
|
prop.teach({0,1},{1});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,12 +13,12 @@ int main() {
|
|||||||
|
|
||||||
n.randomizeWeights();
|
n.randomizeWeights();
|
||||||
|
|
||||||
NeuralNetwork::Learning::OpticalBackPropagation prop;
|
NeuralNetwork::Learning::OpticalBackPropagation prop(n);
|
||||||
for(int i=0;i<10000;i++) {
|
for(int i=0;i<10000;i++) {
|
||||||
prop.teach(n,{1,0},{1});
|
prop.teach({1,0},{1});
|
||||||
prop.teach(n,{1,1},{0});
|
prop.teach({1,1},{0});
|
||||||
prop.teach(n,{0,0},{0});
|
prop.teach({0,0},{0});
|
||||||
prop.teach(n,{0,1},{1});
|
prop.teach({0,1},{1});
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -49,12 +49,12 @@ int main() {
|
|||||||
|
|
||||||
n.randomizeWeights();
|
n.randomizeWeights();
|
||||||
|
|
||||||
NeuralNetwork::Learning::OpticalBackPropagation prop;
|
NeuralNetwork::Learning::OpticalBackPropagation prop(n);
|
||||||
for(int i=0;i<10000;i++) {
|
for(int i=0;i<10000;i++) {
|
||||||
prop.teach(n,{1,1},{1});
|
prop.teach({1,1},{1});
|
||||||
prop.teach(n,{0,0},{0});
|
prop.teach({0,0},{0});
|
||||||
prop.teach(n,{0,1},{0});
|
prop.teach({0,1},{0});
|
||||||
prop.teach(n,{1,0},{0});
|
prop.teach({1,0},{0});
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -85,12 +85,12 @@ int main() {
|
|||||||
|
|
||||||
n.randomizeWeights();
|
n.randomizeWeights();
|
||||||
|
|
||||||
NeuralNetwork::Learning::OpticalBackPropagation prop;
|
NeuralNetwork::Learning::OpticalBackPropagation prop(n);
|
||||||
for(int i=0;i<10000;i++) {
|
for(int i=0;i<10000;i++) {
|
||||||
prop.teach(n,{1,1},{0});
|
prop.teach({1,1},{0});
|
||||||
prop.teach(n,{0,0},{1});
|
prop.teach({0,0},{1});
|
||||||
prop.teach(n,{0,1},{1});
|
prop.teach({0,1},{1});
|
||||||
prop.teach(n,{1,0},{1});
|
prop.teach({1,0},{1});
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ int main() {
|
|||||||
|
|
||||||
for(size_t i=0;i<solutions.size();i++) {
|
for(size_t i=0;i<solutions.size();i++) {
|
||||||
float res= a.computeOutput({1,0.7})[0];
|
float res= a.computeOutput({1,0.7})[0];
|
||||||
float resA=solutions[i];
|
|
||||||
assert(res > solutions[i]*0.999 && res < solutions[i]*1.001);
|
assert(res > solutions[i]*0.999 && res < solutions[i]*1.001);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Reference in New Issue
Block a user