cleaning + Network getter and setter for input / output size
This commit is contained in:
@@ -4,14 +4,7 @@
|
||||
#include <SimpleJSON/Factory.h>
|
||||
|
||||
#define NEURAL_NETWORK_REGISTER_ACTIVATION_FUNCTION(name,function) SIMPLEJSON_REGISTER(NeuralNetwork::ActivationFunction::Factory,name,function)
|
||||
/*public: \
|
||||
static const class __FACT_REGISTER_ {\
|
||||
public: \
|
||||
__FACT_REGISTER_() {\
|
||||
NeuralNetwork::ActivationFunction::Factory::registerCreator( #name ,function);\
|
||||
}\
|
||||
} __FACT_REGISTER;
|
||||
*/
|
||||
|
||||
#define NEURAL_NETWORK_REGISTER_ACTIVATION_FUNCTION_FINISH(name,function) SIMPLEJSON_REGISTER_FINISH(NeuralNetwork::ActivationFunction::Factory,name,function)
|
||||
|
||||
namespace NeuralNetwork {
|
||||
|
||||
@@ -10,7 +10,7 @@ namespace NeuralNetwork {
|
||||
* @brief Constructor for Network
|
||||
* @param _inputSize is number of inputs to network
|
||||
*/
|
||||
Network(std::size_t inputSize, std::size_t outputSize) : NeuralNetwork::Network(), _inputSize(inputSize), _outputSize(outputSize) {
|
||||
Network(std::size_t inputSize, std::size_t outputSize) : NeuralNetwork::Network(inputSize,outputSize) {
|
||||
_neurons.push_back(std::make_shared<BiasNeuron>());
|
||||
|
||||
for(std::size_t i = 0; i < inputSize; i++) {
|
||||
@@ -29,16 +29,16 @@ namespace NeuralNetwork {
|
||||
|
||||
compute[0] = 1.0;
|
||||
|
||||
for(std::size_t i = 1; i <= _inputSize; i++) {
|
||||
for(std::size_t i = 1; i <= _inputs; i++) {
|
||||
compute[i] = input[i - 1];
|
||||
}
|
||||
|
||||
// 0 is bias, 1-_inputSize is input
|
||||
for(std::size_t i = _inputSize + 1; i < _neurons.size(); i++) {
|
||||
for(std::size_t i = _inputs + 1; i < _neurons.size(); i++) {
|
||||
compute[i] = (*_neurons[i].get())(compute);
|
||||
}
|
||||
|
||||
return std::vector<float>(compute.end() - _outputSize, compute.end());
|
||||
return std::vector<float>(compute.end() - _outputs, compute.end());
|
||||
}
|
||||
|
||||
std::size_t getNeuronSize() const {
|
||||
@@ -52,16 +52,16 @@ namespace NeuralNetwork {
|
||||
std::shared_ptr<NeuronInterface> addNeuron() {
|
||||
_neurons.push_back(std::make_shared<Neuron>());
|
||||
auto neuron = _neurons.back();
|
||||
neuron->setInputSize(_neurons.size() - _outputSize);
|
||||
neuron->setInputSize(_neurons.size() - _outputs);
|
||||
// 0 is bias, 1-_inputSize is input
|
||||
std::size_t maxIndexOfNeuron = _neurons.size() - 1;
|
||||
// move output to right position
|
||||
for(std::size_t i = 0; i < _outputSize; i++) {
|
||||
for(std::size_t i = 0; i < _outputs; i++) {
|
||||
std::swap(_neurons[maxIndexOfNeuron - i], _neurons[maxIndexOfNeuron - i - 1]);
|
||||
}
|
||||
|
||||
for(std::size_t i = 0; i < _outputSize; i++) {
|
||||
_neurons[maxIndexOfNeuron - i]->setInputSize(_neurons.size() - _outputSize);
|
||||
for(std::size_t i = 0; i < _outputs; i++) {
|
||||
_neurons[maxIndexOfNeuron - i]->setInputSize(_neurons.size() - _outputs);
|
||||
}
|
||||
return neuron;
|
||||
}
|
||||
@@ -74,8 +74,8 @@ namespace NeuralNetwork {
|
||||
|
||||
return {
|
||||
{"class", "NeuralNetwork::Recurrent::Network"},
|
||||
{"inputSize", _inputSize},
|
||||
{"outputSize", _outputSize},
|
||||
{"inputSize", _inputs},
|
||||
{"outputSize", _outputs},
|
||||
{"neurons", neuronsSerialized}
|
||||
};
|
||||
}
|
||||
@@ -86,7 +86,7 @@ namespace NeuralNetwork {
|
||||
Network *net = new Network(inputSize, outputSize);
|
||||
net->_neurons.clear();
|
||||
|
||||
for(const auto& neuronObj: obj["neurons"].as<SimpleJSON::Type::Array>()) {
|
||||
for(const auto &neuronObj: obj["neurons"].as<SimpleJSON::Type::Array>()) {
|
||||
net->_neurons.push_back(Neuron::Factory::deserialize(neuronObj.as<SimpleJSON::Type::Object>()));
|
||||
}
|
||||
|
||||
@@ -94,9 +94,16 @@ namespace NeuralNetwork {
|
||||
}
|
||||
|
||||
|
||||
//I I H H O O 6
|
||||
void randomizeWeights() {
|
||||
for(std::size_t neuron = _neurons.size() - _outputs; neuron < _neurons.size(); neuron++) {
|
||||
for(std::size_t weight = 0; weight < _neurons.size() - _outputs; weight++) {
|
||||
_neurons[neuron]->weight(weight) = 1.0 - static_cast<float>(rand() % 2001) / 1000.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
std::size_t _inputSize;
|
||||
std::size_t _outputSize;
|
||||
std::vector<std::shared_ptr<NeuronInterface>> _neurons = {};
|
||||
|
||||
SIMPLEJSON_REGISTER(NeuralNetwork::Cascade::Network::Factory, NeuralNetwork::Cascade::Network, deserialize)
|
||||
@@ -20,7 +20,7 @@ namespace FeedForward {
|
||||
* @brief Constructor for Network
|
||||
* @param _inputSize is number of inputs to network
|
||||
*/
|
||||
inline Network(size_t _inputSize):NeuralNetwork::Network(),layers() {
|
||||
inline Network(size_t _inputSize):NeuralNetwork::Network(_inputSize,_inputSize),layers() {
|
||||
appendLayer(_inputSize);
|
||||
};
|
||||
|
||||
@@ -36,8 +36,13 @@ namespace FeedForward {
|
||||
Layer& appendLayer(std::size_t size=1, const ActivationFunction::ActivationFunction &activationFunction=ActivationFunction::Sigmoid(-4.9)) {
|
||||
layers.push_back(new Layer(size,activationFunction));
|
||||
|
||||
if(layers.size() > 1)
|
||||
layers.back()->setInputSize(layers[layers.size()-2]->size());
|
||||
if(layers.size() > 1) {
|
||||
layers.back()->setInputSize(layers[layers.size() - 2]->size());
|
||||
} else {
|
||||
_inputs=size;
|
||||
}
|
||||
|
||||
_outputs=size;
|
||||
|
||||
return *layers[layers.size()-1];//.back();
|
||||
}
|
||||
@@ -77,7 +82,7 @@ namespace FeedForward {
|
||||
std::vector<Layer*> layers;
|
||||
|
||||
private:
|
||||
inline Network():NeuralNetwork::Network(),layers() {
|
||||
inline Network():NeuralNetwork::Network(0,0),layers() {
|
||||
};
|
||||
|
||||
SIMPLEJSON_REGISTER(NeuralNetwork::FeedForward::Network::Factory, NeuralNetwork::FeedForward::Network,NeuralNetwork::FeedForward::Network::deserialize)
|
||||
|
||||
@@ -9,48 +9,61 @@
|
||||
|
||||
#define NEURAL_NETWORK_INIT() const static bool ______TMP= NeuralNetwork::Network::loaded()
|
||||
|
||||
namespace NeuralNetwork
|
||||
{
|
||||
namespace NeuralNetwork {
|
||||
|
||||
/**
|
||||
* @author Tomas Cernik (Tom.Cernik@gmail.com)
|
||||
* @brief Abstract model of simple Network
|
||||
*/
|
||||
class Network : public SimpleJSON::SerializableObject
|
||||
{
|
||||
class Network : public SimpleJSON::SerializableObject {
|
||||
public:
|
||||
/**
|
||||
* @brief Constructor for Network
|
||||
*/
|
||||
inline Network() {
|
||||
inline Network(std::size_t inputs, std::size_t outputs) : _inputs(inputs), _outputs(outputs) {
|
||||
loaded();
|
||||
};
|
||||
|
||||
Network(const Network &r) = default;
|
||||
|
||||
/**
|
||||
* @brief Virtual destructor for Network
|
||||
*/
|
||||
virtual ~Network() {};
|
||||
virtual ~Network() { };
|
||||
|
||||
/**
|
||||
* @brief This is a virtual function for all networks
|
||||
* @param input is input of network
|
||||
* @returns output of network
|
||||
*/
|
||||
virtual std::vector<float> computeOutput(const std::vector<float>& input)=0;
|
||||
virtual std::vector<float> computeOutput(const std::vector<float> &input) = 0;
|
||||
|
||||
std::size_t inputs() {
|
||||
return _inputs;
|
||||
}
|
||||
|
||||
|
||||
std::size_t outputs() {
|
||||
return _outputs;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param t is number of threads, if set to 0 or 1 then threading is disabled
|
||||
* @param threads is number of threads, if set to 0 or 1 then threading is disabled
|
||||
* @brief Enables or disables Threaded computing of ANN
|
||||
*/
|
||||
|
||||
inline virtual void setThreads(const unsigned& t) final {threads=t;}
|
||||
inline virtual void setThreads(const unsigned &threads) final {
|
||||
_threads = threads;
|
||||
}
|
||||
|
||||
protected:
|
||||
/**
|
||||
* @brief Number of threads used by network
|
||||
*/
|
||||
unsigned threads=1;
|
||||
unsigned _threads = 1;
|
||||
|
||||
std::size_t _inputs;
|
||||
std::size_t _outputs;
|
||||
public:
|
||||
static bool loaded();
|
||||
};
|
||||
|
||||
@@ -31,6 +31,14 @@ namespace NeuralNetwork
|
||||
*/
|
||||
virtual ~NeuronInterface() {};
|
||||
|
||||
const std::vector<float> & getWeights() const {
|
||||
return weights;
|
||||
}
|
||||
|
||||
void setWeights(const std::vector<float> &weights_) {
|
||||
weights=weights_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief getter for neuron weight
|
||||
* @param &neuron is neuron it's weight is returned
|
||||
|
||||
@@ -24,14 +24,14 @@ namespace Recurrent {
|
||||
* @param _outputSize is size of output from network
|
||||
* @param hiddenUnits is number of hiddenUnits to be created
|
||||
*/
|
||||
inline Network(size_t _inputSize, size_t _outputSize,size_t hiddenUnits=0):NeuralNetwork::Network(),inputSize(_inputSize),outputSize(_outputSize), neurons(0),outputs(0) {
|
||||
inline Network(size_t inputSize, size_t outputSize,size_t hiddenUnits=0):NeuralNetwork::Network(inputSize,outputSize), neurons(0),outputs(0) {
|
||||
neurons.push_back(new NeuralNetwork::BiasNeuron());
|
||||
|
||||
for(size_t i=0;i<_inputSize;i++) {
|
||||
for(size_t i=0;i<inputSize;i++) {
|
||||
neurons.push_back(new NeuralNetwork::InputNeuron(neurons.size()));
|
||||
}
|
||||
|
||||
for(size_t i=0;i<_outputSize;i++) {
|
||||
for(size_t i=0;i<outputSize;i++) {
|
||||
addNeuron();
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ namespace Recurrent {
|
||||
}
|
||||
};
|
||||
|
||||
Network(const Network &r) :inputSize(r.inputSize), outputSize(r.outputSize), neurons(0), outputs(r.outputs) {
|
||||
Network(const Network &r) : NeuralNetwork::Network(r), neurons(0), outputs(r.outputs) {
|
||||
neurons.push_back(new NeuralNetwork::BiasNeuron());
|
||||
for(std::size_t i=1;i<r.neurons.size();i++) {
|
||||
neurons.push_back(r.neurons[i]->clone());
|
||||
@@ -109,8 +109,6 @@ namespace Recurrent {
|
||||
|
||||
typedef SimpleJSON::Factory<Network> Factory;
|
||||
protected:
|
||||
size_t inputSize=0;
|
||||
size_t outputSize=0;
|
||||
|
||||
std::vector<NeuronInterface*> neurons;
|
||||
std::vector<float> outputs;
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
#include <NeuralNetwork/BasisFunction/Linear.h>
|
||||
|
||||
float NeuralNetwork::BasisFunction::Linear::operator()(const std::vector<float> &weights, const std::vector<float> &input) const {
|
||||
assert(input.size()== weights.size());
|
||||
std::size_t inputSize=input.size();
|
||||
assert(input.size() >= weights.size());
|
||||
std::size_t weightsSize=weights.size();
|
||||
|
||||
#ifdef USE_AVX
|
||||
|
||||
std::size_t alignedPrev=inputSize-inputSize%8;
|
||||
std::size_t alignedPrev=weightsSize-weightsSize%8;
|
||||
|
||||
const float* weightsData=weights.data();
|
||||
const float* inputData=input.data();
|
||||
@@ -32,7 +32,7 @@ float NeuralNetwork::BasisFunction::Linear::operator()(const std::vector<float>
|
||||
#endif
|
||||
}
|
||||
|
||||
for(size_t k=alignedPrev;k<inputSize;k++) {
|
||||
for(size_t k=alignedPrev;k<weightsSize;k++) {
|
||||
#ifdef USE_FMA
|
||||
partialSolution.avx=_mm256_fmadd_ps(_mm256_set_ps(weightsData[k],0,0,0,0,0,0,0),_mm256_set_ps(inputData[k],0,0,0,0,0,0,0),partialSolution.avx);
|
||||
#else
|
||||
@@ -49,7 +49,7 @@ float NeuralNetwork::BasisFunction::Linear::operator()(const std::vector<float>
|
||||
|
||||
#elif USE_SSE
|
||||
|
||||
std::size_t alignedPrev=inputSize-inputSize%4;
|
||||
std::size_t alignedPrev=weightSize-weightSize%4;
|
||||
|
||||
const float* weightsData=weights.data();
|
||||
const float* inputData=input.data();
|
||||
@@ -61,7 +61,7 @@ float NeuralNetwork::BasisFunction::Linear::operator()(const std::vector<float>
|
||||
partialSolution.sse=_mm_add_ps(partialSolution.sse,_mm_mul_ps(_mm_load_ps(weightsData+k),_mm_load_ps(inputData+k)));
|
||||
}
|
||||
|
||||
for(register size_t k=alignedPrev;k<inputSize;k++) {
|
||||
for(register size_t k=alignedPrev;k<weightSize;k++) {
|
||||
partialSolution.sse=_mm_add_ps(partialSolution.sse,_mm_mul_ps(_mm_load_ss(weightsData+k),_mm_load_ss(inputData+k)));
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ float NeuralNetwork::BasisFunction::Linear::operator()(const std::vector<float>
|
||||
#else
|
||||
|
||||
register float tmp = 0;
|
||||
for(size_t k=0;k<inputSize;k++) {
|
||||
for(size_t k=0;k<weightSize;k++) {
|
||||
tmp+=input[k]*weights[k];
|
||||
}
|
||||
return tmp;
|
||||
|
||||
@@ -39,11 +39,16 @@ std::unique_ptr<NeuralNetwork::FeedForward::Network> NeuralNetwork::FeedForward:
|
||||
for(auto layers:network->layers) {
|
||||
delete layers;
|
||||
}
|
||||
|
||||
network->layers.clear();
|
||||
|
||||
for(auto& layerObject: obj["layers"].as<SimpleJSON::Type::Array>()) {
|
||||
network->layers.push_back(NeuralNetwork::FeedForward::Layer::Factory::deserialize(layerObject.as<SimpleJSON::Type::Object>()).release());
|
||||
}
|
||||
|
||||
network->_inputs=network->layers[0]->size()-1;
|
||||
network->_outputs=network->layers.back()->size()-1;
|
||||
|
||||
return std::unique_ptr<Network>(network);
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ SIMPLEJSON_REGISTER_FINISH(NeuralNetwork::Recurrent::Network::Factory, NeuralNet
|
||||
|
||||
std::vector<float> NeuralNetwork::Recurrent::Network::computeOutput(const std::vector<float>& input, unsigned int iterations) {
|
||||
|
||||
assert(input.size() == inputSize);
|
||||
assert(input.size() == _inputs);
|
||||
|
||||
if(outputs.size() != neurons.size()) {
|
||||
outputs.resize(neurons.size());
|
||||
@@ -16,7 +16,7 @@ std::vector<float> NeuralNetwork::Recurrent::Network::computeOutput(const std::v
|
||||
|
||||
std::vector<float> newOutputs(neurons.size());
|
||||
|
||||
for(size_t i=0;i<inputSize;i++) {
|
||||
for(size_t i=0;i<_inputs;i++) {
|
||||
outputs[i+1]=input[i];
|
||||
newOutputs[i+1]=input[i];
|
||||
}
|
||||
@@ -26,15 +26,15 @@ std::vector<float> NeuralNetwork::Recurrent::Network::computeOutput(const std::v
|
||||
std::size_t neuronsSize = neurons.size();
|
||||
|
||||
for(unsigned int iter=0;iter< iterations;iter++) {
|
||||
for(size_t i=inputSize+1;i<neuronsSize;i++) {
|
||||
for(size_t i=_inputs+1;i<neuronsSize;i++) {
|
||||
newOutputs[i] = neurons[i]->operator()(outputs);
|
||||
}
|
||||
outputs.swap(newOutputs);
|
||||
}
|
||||
|
||||
std::vector<float> ret;
|
||||
for(size_t i=0;i<outputSize;i++) {
|
||||
ret.push_back(neurons[i+inputSize+1]->output());
|
||||
for(size_t i=0;i<_outputs;i++) {
|
||||
ret.push_back(neurons[i+_inputs+1]->output());
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -45,9 +45,7 @@ NeuralNetwork::Recurrent::Network NeuralNetwork::Recurrent::Network::connectWith
|
||||
}
|
||||
|
||||
NeuralNetwork::Recurrent::Network& NeuralNetwork::Recurrent::Network::operator=(const NeuralNetwork::Recurrent::Network&r) {
|
||||
inputSize=r.inputSize;
|
||||
outputSize=r.outputSize;
|
||||
outputs=r.outputs;
|
||||
NeuralNetwork::Network::operator=(r);
|
||||
|
||||
for(std::size_t i=1;i<neurons.size();i++) {
|
||||
delete neurons[i];
|
||||
@@ -68,8 +66,8 @@ SimpleJSON::Type::Object NeuralNetwork::Recurrent::Network::serialize() const {
|
||||
}
|
||||
return {
|
||||
{"class", "NeuralNetwork::Recurrent::Network"},
|
||||
{"inputSize", inputSize},
|
||||
{"outputSize", outputSize},
|
||||
{"inputSize", _inputs},
|
||||
{"outputSize", _outputs},
|
||||
{"outputs", outputs},
|
||||
{"neurons", neuronsSerialized}
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user