cleaning + Network getter and setter for input / output size

This commit is contained in:
2016-05-03 22:03:03 +02:00
parent 6a17694a6b
commit 58f7f8f69b
9 changed files with 85 additions and 58 deletions

View File

@@ -4,14 +4,7 @@
#include <SimpleJSON/Factory.h> #include <SimpleJSON/Factory.h>
#define NEURAL_NETWORK_REGISTER_ACTIVATION_FUNCTION(name,function) SIMPLEJSON_REGISTER(NeuralNetwork::ActivationFunction::Factory,name,function) #define NEURAL_NETWORK_REGISTER_ACTIVATION_FUNCTION(name,function) SIMPLEJSON_REGISTER(NeuralNetwork::ActivationFunction::Factory,name,function)
/*public: \
static const class __FACT_REGISTER_ {\
public: \
__FACT_REGISTER_() {\
NeuralNetwork::ActivationFunction::Factory::registerCreator( #name ,function);\
}\
} __FACT_REGISTER;
*/
#define NEURAL_NETWORK_REGISTER_ACTIVATION_FUNCTION_FINISH(name,function) SIMPLEJSON_REGISTER_FINISH(NeuralNetwork::ActivationFunction::Factory,name,function) #define NEURAL_NETWORK_REGISTER_ACTIVATION_FUNCTION_FINISH(name,function) SIMPLEJSON_REGISTER_FINISH(NeuralNetwork::ActivationFunction::Factory,name,function)
namespace NeuralNetwork { namespace NeuralNetwork {

View File

@@ -10,7 +10,7 @@ namespace NeuralNetwork {
* @brief Constructor for Network * @brief Constructor for Network
* @param _inputSize is number of inputs to network * @param _inputSize is number of inputs to network
*/ */
Network(std::size_t inputSize, std::size_t outputSize) : NeuralNetwork::Network(), _inputSize(inputSize), _outputSize(outputSize) { Network(std::size_t inputSize, std::size_t outputSize) : NeuralNetwork::Network(inputSize,outputSize) {
_neurons.push_back(std::make_shared<BiasNeuron>()); _neurons.push_back(std::make_shared<BiasNeuron>());
for(std::size_t i = 0; i < inputSize; i++) { for(std::size_t i = 0; i < inputSize; i++) {
@@ -29,16 +29,16 @@ namespace NeuralNetwork {
compute[0] = 1.0; compute[0] = 1.0;
for(std::size_t i = 1; i <= _inputSize; i++) { for(std::size_t i = 1; i <= _inputs; i++) {
compute[i] = input[i - 1]; compute[i] = input[i - 1];
} }
// 0 is bias, 1-_inputSize is input // 0 is bias, 1-_inputSize is input
for(std::size_t i = _inputSize + 1; i < _neurons.size(); i++) { for(std::size_t i = _inputs + 1; i < _neurons.size(); i++) {
compute[i] = (*_neurons[i].get())(compute); compute[i] = (*_neurons[i].get())(compute);
} }
return std::vector<float>(compute.end() - _outputSize, compute.end()); return std::vector<float>(compute.end() - _outputs, compute.end());
} }
std::size_t getNeuronSize() const { std::size_t getNeuronSize() const {
@@ -52,16 +52,16 @@ namespace NeuralNetwork {
std::shared_ptr<NeuronInterface> addNeuron() { std::shared_ptr<NeuronInterface> addNeuron() {
_neurons.push_back(std::make_shared<Neuron>()); _neurons.push_back(std::make_shared<Neuron>());
auto neuron = _neurons.back(); auto neuron = _neurons.back();
neuron->setInputSize(_neurons.size() - _outputSize); neuron->setInputSize(_neurons.size() - _outputs);
// 0 is bias, 1-_inputSize is input // 0 is bias, 1-_inputSize is input
std::size_t maxIndexOfNeuron = _neurons.size() - 1; std::size_t maxIndexOfNeuron = _neurons.size() - 1;
// move output to right position // move output to right position
for(std::size_t i = 0; i < _outputSize; i++) { for(std::size_t i = 0; i < _outputs; i++) {
std::swap(_neurons[maxIndexOfNeuron - i], _neurons[maxIndexOfNeuron - i - 1]); std::swap(_neurons[maxIndexOfNeuron - i], _neurons[maxIndexOfNeuron - i - 1]);
} }
for(std::size_t i = 0; i < _outputSize; i++) { for(std::size_t i = 0; i < _outputs; i++) {
_neurons[maxIndexOfNeuron - i]->setInputSize(_neurons.size() - _outputSize); _neurons[maxIndexOfNeuron - i]->setInputSize(_neurons.size() - _outputs);
} }
return neuron; return neuron;
} }
@@ -74,8 +74,8 @@ namespace NeuralNetwork {
return { return {
{"class", "NeuralNetwork::Recurrent::Network"}, {"class", "NeuralNetwork::Recurrent::Network"},
{"inputSize", _inputSize}, {"inputSize", _inputs},
{"outputSize", _outputSize}, {"outputSize", _outputs},
{"neurons", neuronsSerialized} {"neurons", neuronsSerialized}
}; };
} }
@@ -94,9 +94,16 @@ namespace NeuralNetwork {
} }
//I I H H O O 6
void randomizeWeights() {
for(std::size_t neuron = _neurons.size() - _outputs; neuron < _neurons.size(); neuron++) {
for(std::size_t weight = 0; weight < _neurons.size() - _outputs; weight++) {
_neurons[neuron]->weight(weight) = 1.0 - static_cast<float>(rand() % 2001) / 1000.0;
}
}
}
protected: protected:
std::size_t _inputSize;
std::size_t _outputSize;
std::vector<std::shared_ptr<NeuronInterface>> _neurons = {}; std::vector<std::shared_ptr<NeuronInterface>> _neurons = {};
SIMPLEJSON_REGISTER(NeuralNetwork::Cascade::Network::Factory, NeuralNetwork::Cascade::Network, deserialize) SIMPLEJSON_REGISTER(NeuralNetwork::Cascade::Network::Factory, NeuralNetwork::Cascade::Network, deserialize)

View File

@@ -20,7 +20,7 @@ namespace FeedForward {
* @brief Constructor for Network * @brief Constructor for Network
* @param _inputSize is number of inputs to network * @param _inputSize is number of inputs to network
*/ */
inline Network(size_t _inputSize):NeuralNetwork::Network(),layers() { inline Network(size_t _inputSize):NeuralNetwork::Network(_inputSize,_inputSize),layers() {
appendLayer(_inputSize); appendLayer(_inputSize);
}; };
@@ -36,8 +36,13 @@ namespace FeedForward {
Layer& appendLayer(std::size_t size=1, const ActivationFunction::ActivationFunction &activationFunction=ActivationFunction::Sigmoid(-4.9)) { Layer& appendLayer(std::size_t size=1, const ActivationFunction::ActivationFunction &activationFunction=ActivationFunction::Sigmoid(-4.9)) {
layers.push_back(new Layer(size,activationFunction)); layers.push_back(new Layer(size,activationFunction));
if(layers.size() > 1) if(layers.size() > 1) {
layers.back()->setInputSize(layers[layers.size() - 2]->size()); layers.back()->setInputSize(layers[layers.size() - 2]->size());
} else {
_inputs=size;
}
_outputs=size;
return *layers[layers.size()-1];//.back(); return *layers[layers.size()-1];//.back();
} }
@@ -77,7 +82,7 @@ namespace FeedForward {
std::vector<Layer*> layers; std::vector<Layer*> layers;
private: private:
inline Network():NeuralNetwork::Network(),layers() { inline Network():NeuralNetwork::Network(0,0),layers() {
}; };
SIMPLEJSON_REGISTER(NeuralNetwork::FeedForward::Network::Factory, NeuralNetwork::FeedForward::Network,NeuralNetwork::FeedForward::Network::deserialize) SIMPLEJSON_REGISTER(NeuralNetwork::FeedForward::Network::Factory, NeuralNetwork::FeedForward::Network,NeuralNetwork::FeedForward::Network::deserialize)

View File

@@ -9,23 +9,23 @@
#define NEURAL_NETWORK_INIT() const static bool ______TMP= NeuralNetwork::Network::loaded() #define NEURAL_NETWORK_INIT() const static bool ______TMP= NeuralNetwork::Network::loaded()
namespace NeuralNetwork namespace NeuralNetwork {
{
/** /**
* @author Tomas Cernik (Tom.Cernik@gmail.com) * @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Abstract model of simple Network * @brief Abstract model of simple Network
*/ */
class Network : public SimpleJSON::SerializableObject class Network : public SimpleJSON::SerializableObject {
{
public: public:
/** /**
* @brief Constructor for Network * @brief Constructor for Network
*/ */
inline Network() { inline Network(std::size_t inputs, std::size_t outputs) : _inputs(inputs), _outputs(outputs) {
loaded(); loaded();
}; };
Network(const Network &r) = default;
/** /**
* @brief Virtual destructor for Network * @brief Virtual destructor for Network
*/ */
@@ -38,19 +38,32 @@ namespace NeuralNetwork
*/ */
virtual std::vector<float> computeOutput(const std::vector<float> &input) = 0; virtual std::vector<float> computeOutput(const std::vector<float> &input) = 0;
std::size_t inputs() {
return _inputs;
}
std::size_t outputs() {
return _outputs;
}
/** /**
* @param t is number of threads, if set to 0 or 1 then threading is disabled * @param threads is number of threads, if set to 0 or 1 then threading is disabled
* @brief Enables or disables Threaded computing of ANN * @brief Enables or disables Threaded computing of ANN
*/ */
inline virtual void setThreads(const unsigned& t) final {threads=t;} inline virtual void setThreads(const unsigned &threads) final {
_threads = threads;
}
protected: protected:
/** /**
* @brief Number of threads used by network * @brief Number of threads used by network
*/ */
unsigned threads=1; unsigned _threads = 1;
std::size_t _inputs;
std::size_t _outputs;
public: public:
static bool loaded(); static bool loaded();
}; };

View File

@@ -31,6 +31,14 @@ namespace NeuralNetwork
*/ */
virtual ~NeuronInterface() {}; virtual ~NeuronInterface() {};
const std::vector<float> & getWeights() const {
return weights;
}
void setWeights(const std::vector<float> &weights_) {
weights=weights_;
}
/** /**
* @brief getter for neuron weight * @brief getter for neuron weight
* @param &neuron is neuron it's weight is returned * @param &neuron is neuron it's weight is returned

View File

@@ -24,14 +24,14 @@ namespace Recurrent {
* @param _outputSize is size of output from network * @param _outputSize is size of output from network
* @param hiddenUnits is number of hiddenUnits to be created * @param hiddenUnits is number of hiddenUnits to be created
*/ */
inline Network(size_t _inputSize, size_t _outputSize,size_t hiddenUnits=0):NeuralNetwork::Network(),inputSize(_inputSize),outputSize(_outputSize), neurons(0),outputs(0) { inline Network(size_t inputSize, size_t outputSize,size_t hiddenUnits=0):NeuralNetwork::Network(inputSize,outputSize), neurons(0),outputs(0) {
neurons.push_back(new NeuralNetwork::BiasNeuron()); neurons.push_back(new NeuralNetwork::BiasNeuron());
for(size_t i=0;i<_inputSize;i++) { for(size_t i=0;i<inputSize;i++) {
neurons.push_back(new NeuralNetwork::InputNeuron(neurons.size())); neurons.push_back(new NeuralNetwork::InputNeuron(neurons.size()));
} }
for(size_t i=0;i<_outputSize;i++) { for(size_t i=0;i<outputSize;i++) {
addNeuron(); addNeuron();
} }
@@ -40,7 +40,7 @@ namespace Recurrent {
} }
}; };
Network(const Network &r) :inputSize(r.inputSize), outputSize(r.outputSize), neurons(0), outputs(r.outputs) { Network(const Network &r) : NeuralNetwork::Network(r), neurons(0), outputs(r.outputs) {
neurons.push_back(new NeuralNetwork::BiasNeuron()); neurons.push_back(new NeuralNetwork::BiasNeuron());
for(std::size_t i=1;i<r.neurons.size();i++) { for(std::size_t i=1;i<r.neurons.size();i++) {
neurons.push_back(r.neurons[i]->clone()); neurons.push_back(r.neurons[i]->clone());
@@ -109,8 +109,6 @@ namespace Recurrent {
typedef SimpleJSON::Factory<Network> Factory; typedef SimpleJSON::Factory<Network> Factory;
protected: protected:
size_t inputSize=0;
size_t outputSize=0;
std::vector<NeuronInterface*> neurons; std::vector<NeuronInterface*> neurons;
std::vector<float> outputs; std::vector<float> outputs;

View File

@@ -1,12 +1,12 @@
#include <NeuralNetwork/BasisFunction/Linear.h> #include <NeuralNetwork/BasisFunction/Linear.h>
float NeuralNetwork::BasisFunction::Linear::operator()(const std::vector<float> &weights, const std::vector<float> &input) const { float NeuralNetwork::BasisFunction::Linear::operator()(const std::vector<float> &weights, const std::vector<float> &input) const {
assert(input.size()== weights.size()); assert(input.size() >= weights.size());
std::size_t inputSize=input.size(); std::size_t weightsSize=weights.size();
#ifdef USE_AVX #ifdef USE_AVX
std::size_t alignedPrev=inputSize-inputSize%8; std::size_t alignedPrev=weightsSize-weightsSize%8;
const float* weightsData=weights.data(); const float* weightsData=weights.data();
const float* inputData=input.data(); const float* inputData=input.data();
@@ -32,7 +32,7 @@ float NeuralNetwork::BasisFunction::Linear::operator()(const std::vector<float>
#endif #endif
} }
for(size_t k=alignedPrev;k<inputSize;k++) { for(size_t k=alignedPrev;k<weightsSize;k++) {
#ifdef USE_FMA #ifdef USE_FMA
partialSolution.avx=_mm256_fmadd_ps(_mm256_set_ps(weightsData[k],0,0,0,0,0,0,0),_mm256_set_ps(inputData[k],0,0,0,0,0,0,0),partialSolution.avx); partialSolution.avx=_mm256_fmadd_ps(_mm256_set_ps(weightsData[k],0,0,0,0,0,0,0),_mm256_set_ps(inputData[k],0,0,0,0,0,0,0),partialSolution.avx);
#else #else
@@ -49,7 +49,7 @@ float NeuralNetwork::BasisFunction::Linear::operator()(const std::vector<float>
#elif USE_SSE #elif USE_SSE
std::size_t alignedPrev=inputSize-inputSize%4; std::size_t alignedPrev=weightSize-weightSize%4;
const float* weightsData=weights.data(); const float* weightsData=weights.data();
const float* inputData=input.data(); const float* inputData=input.data();
@@ -61,7 +61,7 @@ float NeuralNetwork::BasisFunction::Linear::operator()(const std::vector<float>
partialSolution.sse=_mm_add_ps(partialSolution.sse,_mm_mul_ps(_mm_load_ps(weightsData+k),_mm_load_ps(inputData+k))); partialSolution.sse=_mm_add_ps(partialSolution.sse,_mm_mul_ps(_mm_load_ps(weightsData+k),_mm_load_ps(inputData+k)));
} }
for(register size_t k=alignedPrev;k<inputSize;k++) { for(register size_t k=alignedPrev;k<weightSize;k++) {
partialSolution.sse=_mm_add_ps(partialSolution.sse,_mm_mul_ps(_mm_load_ss(weightsData+k),_mm_load_ss(inputData+k))); partialSolution.sse=_mm_add_ps(partialSolution.sse,_mm_mul_ps(_mm_load_ss(weightsData+k),_mm_load_ss(inputData+k)));
} }
@@ -76,7 +76,7 @@ float NeuralNetwork::BasisFunction::Linear::operator()(const std::vector<float>
#else #else
register float tmp = 0; register float tmp = 0;
for(size_t k=0;k<inputSize;k++) { for(size_t k=0;k<weightSize;k++) {
tmp+=input[k]*weights[k]; tmp+=input[k]*weights[k];
} }
return tmp; return tmp;

View File

@@ -39,11 +39,16 @@ std::unique_ptr<NeuralNetwork::FeedForward::Network> NeuralNetwork::FeedForward:
for(auto layers:network->layers) { for(auto layers:network->layers) {
delete layers; delete layers;
} }
network->layers.clear(); network->layers.clear();
for(auto& layerObject: obj["layers"].as<SimpleJSON::Type::Array>()) { for(auto& layerObject: obj["layers"].as<SimpleJSON::Type::Array>()) {
network->layers.push_back(NeuralNetwork::FeedForward::Layer::Factory::deserialize(layerObject.as<SimpleJSON::Type::Object>()).release()); network->layers.push_back(NeuralNetwork::FeedForward::Layer::Factory::deserialize(layerObject.as<SimpleJSON::Type::Object>()).release());
} }
network->_inputs=network->layers[0]->size()-1;
network->_outputs=network->layers.back()->size()-1;
return std::unique_ptr<Network>(network); return std::unique_ptr<Network>(network);
} }

View File

@@ -5,7 +5,7 @@ SIMPLEJSON_REGISTER_FINISH(NeuralNetwork::Recurrent::Network::Factory, NeuralNet
std::vector<float> NeuralNetwork::Recurrent::Network::computeOutput(const std::vector<float>& input, unsigned int iterations) { std::vector<float> NeuralNetwork::Recurrent::Network::computeOutput(const std::vector<float>& input, unsigned int iterations) {
assert(input.size() == inputSize); assert(input.size() == _inputs);
if(outputs.size() != neurons.size()) { if(outputs.size() != neurons.size()) {
outputs.resize(neurons.size()); outputs.resize(neurons.size());
@@ -16,7 +16,7 @@ std::vector<float> NeuralNetwork::Recurrent::Network::computeOutput(const std::v
std::vector<float> newOutputs(neurons.size()); std::vector<float> newOutputs(neurons.size());
for(size_t i=0;i<inputSize;i++) { for(size_t i=0;i<_inputs;i++) {
outputs[i+1]=input[i]; outputs[i+1]=input[i];
newOutputs[i+1]=input[i]; newOutputs[i+1]=input[i];
} }
@@ -26,15 +26,15 @@ std::vector<float> NeuralNetwork::Recurrent::Network::computeOutput(const std::v
std::size_t neuronsSize = neurons.size(); std::size_t neuronsSize = neurons.size();
for(unsigned int iter=0;iter< iterations;iter++) { for(unsigned int iter=0;iter< iterations;iter++) {
for(size_t i=inputSize+1;i<neuronsSize;i++) { for(size_t i=_inputs+1;i<neuronsSize;i++) {
newOutputs[i] = neurons[i]->operator()(outputs); newOutputs[i] = neurons[i]->operator()(outputs);
} }
outputs.swap(newOutputs); outputs.swap(newOutputs);
} }
std::vector<float> ret; std::vector<float> ret;
for(size_t i=0;i<outputSize;i++) { for(size_t i=0;i<_outputs;i++) {
ret.push_back(neurons[i+inputSize+1]->output()); ret.push_back(neurons[i+_inputs+1]->output());
} }
return ret; return ret;
@@ -45,9 +45,7 @@ NeuralNetwork::Recurrent::Network NeuralNetwork::Recurrent::Network::connectWith
} }
NeuralNetwork::Recurrent::Network& NeuralNetwork::Recurrent::Network::operator=(const NeuralNetwork::Recurrent::Network&r) { NeuralNetwork::Recurrent::Network& NeuralNetwork::Recurrent::Network::operator=(const NeuralNetwork::Recurrent::Network&r) {
inputSize=r.inputSize; NeuralNetwork::Network::operator=(r);
outputSize=r.outputSize;
outputs=r.outputs;
for(std::size_t i=1;i<neurons.size();i++) { for(std::size_t i=1;i<neurons.size();i++) {
delete neurons[i]; delete neurons[i];
@@ -68,8 +66,8 @@ SimpleJSON::Type::Object NeuralNetwork::Recurrent::Network::serialize() const {
} }
return { return {
{"class", "NeuralNetwork::Recurrent::Network"}, {"class", "NeuralNetwork::Recurrent::Network"},
{"inputSize", inputSize}, {"inputSize", _inputs},
{"outputSize", outputSize}, {"outputSize", _outputs},
{"outputs", outputs}, {"outputs", outputs},
{"neurons", neuronsSerialized} {"neurons", neuronsSerialized}
}; };