Modified FeedForward to allow set activation to whole Layer and added XOR test for FF
This commit is contained in:
@@ -16,10 +16,10 @@ namespace FeedForward {
|
||||
class Layer : public Stringifiable {
|
||||
|
||||
public:
|
||||
Layer(std::size_t size = 0):neurons() {
|
||||
Layer(std::size_t size, const ActivationFunction::ActivationFunction &activationFunction):neurons() {
|
||||
neurons.push_back(new BiasNeuron);
|
||||
for(std::size_t i=0;i<size;i++) {
|
||||
neurons.push_back(new Neuron(neurons.size()));
|
||||
neurons.push_back(new Neuron(neurons.size(),activationFunction));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,8 @@
|
||||
#include <iomanip>
|
||||
#include <limits>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace NeuralNetwork {
|
||||
namespace FeedForward {
|
||||
|
||||
@@ -29,24 +31,27 @@ namespace FeedForward {
|
||||
appendLayer(_inputSize);
|
||||
};
|
||||
|
||||
Layer& appendLayer(std::size_t size=1) {
|
||||
layers.push_back(Layer(size));
|
||||
|
||||
if(layers.size() > 1)
|
||||
layers.back().setInputSize(layers[layers.size()-2].size());
|
||||
|
||||
return layers.back();
|
||||
}
|
||||
|
||||
Layer& operator[](const std::size_t &id) {
|
||||
return layers[id];
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Virtual destructor for Network
|
||||
*/
|
||||
virtual ~Network() {
|
||||
};
|
||||
for(auto &layer:layers) {
|
||||
delete layer;
|
||||
}
|
||||
}
|
||||
|
||||
Layer& appendLayer(std::size_t size=1, const ActivationFunction::ActivationFunction &activationFunction=ActivationFunction::Sigmoid(-4.9)) {
|
||||
layers.push_back(new Layer(size,activationFunction));
|
||||
|
||||
if(layers.size() > 1)
|
||||
layers.back()->setInputSize(layers[layers.size()-2]->size());
|
||||
|
||||
return *layers[layers.size()-1];//.back();
|
||||
}
|
||||
|
||||
Layer& operator[](const std::size_t &id) {
|
||||
return *layers[id];
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief This is a function to compute one iterations of network
|
||||
@@ -66,7 +71,7 @@ namespace FeedForward {
|
||||
if(!first) {
|
||||
out << ",";
|
||||
}
|
||||
out << layer;
|
||||
out << *layer;
|
||||
first=false;
|
||||
}
|
||||
out << "]";
|
||||
@@ -74,7 +79,7 @@ namespace FeedForward {
|
||||
}
|
||||
|
||||
protected:
|
||||
std::vector<Layer> layers;
|
||||
std::vector<Layer*> layers;
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -92,9 +92,10 @@ namespace NeuralNetwork
|
||||
class Neuron: public NeuronInterface
|
||||
{
|
||||
public:
|
||||
Neuron(unsigned long _id=0): NeuronInterface(), basis(new BasisFunction::Linear),
|
||||
activation(new ActivationFunction::Sigmoid(-4.9)),
|
||||
id_(_id),weights(_id+1),_output(0),_value(0) {
|
||||
Neuron(unsigned long _id=0, const ActivationFunction::ActivationFunction &activationFunction=ActivationFunction::Sigmoid(-4.9)):
|
||||
NeuronInterface(), basis(new BasisFunction::Linear),
|
||||
activation(activationFunction.clone()),
|
||||
id_(_id),weights(_id+1),_output(0),_value(0) {
|
||||
}
|
||||
|
||||
Neuron(const Neuron &r): NeuronInterface(), basis(r.basis->clone()), activation(r.activation->clone()),id_(r.id_),
|
||||
|
||||
Reference in New Issue
Block a user