modified BP interface

This commit is contained in:
2016-02-24 19:05:26 +01:00
parent 47de0fa08b
commit c45f12f53c
10 changed files with 195 additions and 13 deletions

View File

@@ -2,7 +2,9 @@
#include <vector>
#include <cmath>
#include <NeuralNetwork/FeedForward/Network.h>
#include "CorrectionFunction/Linear.h"
namespace NeuralNetwork {
namespace Learning {
@@ -13,21 +15,23 @@ namespace Learning {
class BackPropagation {
public:
inline BackPropagation(FeedForward::Network &feedForwardNetwork): network(feedForwardNetwork), learningCoefficient(0.4), deltas() {
inline BackPropagation(FeedForward::Network &feedForwardNetwork, CorrectionFunction::CorrectionFunction *correction = new CorrectionFunction::Linear()):
network(feedForwardNetwork), correctionFunction(correction),learningCoefficient(0.4), deltas() {
resize();
}
virtual ~BackPropagation() {
delete correctionFunction;
}
BackPropagation(const BackPropagation&)=delete;
BackPropagation& operator=(const NeuralNetwork::Learning::BackPropagation&) = delete;
void teach(const std::vector<float> &input, const std::vector<float> &output);
inline virtual void setLearningCoefficient (const float& coefficient) { learningCoefficient=coefficient; }
protected:
inline virtual float correction(const float & expected, const float &computed) const {
return expected-computed;
};
inline void resize() {
if(deltas.size()!=network.size())
@@ -41,6 +45,8 @@ namespace Learning {
FeedForward::Network &network;
CorrectionFunction::CorrectionFunction *correctionFunction;
float learningCoefficient;
std::vector<std::vector<float>> deltas;

View File

@@ -0,0 +1,29 @@
#pragma once
#include "CorrectionFunction.h"
#include <iostream>
namespace NeuralNetwork {
namespace Learning {
namespace CorrectionFunction {
class ArcTangent : public CorrectionFunction {
public:
ArcTangent (const float &c=1.0): coefficient(c) {
}
/**
* @brief operator returns error for values
*
*/
inline virtual float operator()(const float &expected, const float &computed) const override final {
//std::cout << (expected-computed) << ":" << atan(expected-computed) << "\n";
return atan(coefficient*(expected-computed));
}
private:
const float coefficient;
};
}
}
}

View File

@@ -0,0 +1,19 @@
#pragma once
namespace NeuralNetwork {
namespace Learning {
namespace CorrectionFunction {
class CorrectionFunction {
public:
virtual ~ CorrectionFunction() {
}
/**
* @brief operator returns error for values
*
*/
virtual float operator()(const float & expected, const float &computed) const = 0;
};
}
}
}

View File

@@ -0,0 +1,20 @@
#pragma once
#include "CorrectionFunction.h"
namespace NeuralNetwork {
namespace Learning {
namespace CorrectionFunction {
class Linear : public CorrectionFunction {
public:
/**
* @brief operator returns error for values
*
*/
inline virtual float operator()(const float &expected, const float &computed) const override final {
return expected-computed;
}
};
}
}
}

View File

@@ -0,0 +1,22 @@
#pragma once
#include "CorrectionFunction.h"
namespace NeuralNetwork {
namespace Learning {
namespace CorrectionFunction {
class Optical : public CorrectionFunction {
public:
/**
* @brief operator returns error for values
*
*/
inline virtual float operator()(const float &expected, const float &computed) const override final {
register float tmp=(expected-computed);
register float ret=1+exp(tmp*tmp);
return tmp < 0? -ret:ret;
}
};
}
}
}

View File

@@ -1,6 +1,7 @@
#pragma once
#include "./BackPropagation.h"
#include "./CorrectionFunction/Optical.h"
namespace NeuralNetwork {
namespace Learning {
@@ -11,18 +12,12 @@ namespace Learning {
class OpticalBackPropagation : public BackPropagation {
public:
OpticalBackPropagation(FeedForward::Network &feedForwardNetwork): BackPropagation(feedForwardNetwork) {
OpticalBackPropagation(FeedForward::Network &feedForwardNetwork): BackPropagation(feedForwardNetwork,new CorrectionFunction::Optical()) {
}
virtual ~OpticalBackPropagation() {
}
protected:
inline virtual float correction(const float & expected, const float &computed) const override {
register float tmp=(expected-computed);
register float ret=1+exp(tmp*tmp);
return tmp < 0? -ret:ret;
};
};
}
}