Merge branch 'docu'

Conflicts:
	src/NeuronNetwork/FeedForward.h
	src/NeuronNetwork/Network.h
	src/NeuronNetwork/Neuron.h
This commit is contained in:
2015-01-28 18:56:47 +01:00
68 changed files with 2483 additions and 513 deletions

3
.gitignore vendored
View File

@@ -2,4 +2,7 @@ NN.kdev4
.kdev4
*.o
*.a
*.so
*.nm
/doc/html/*
!/doc/html/doxy-boot.js

1210
Doxyfile Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -7,23 +7,35 @@ all:|pre libs
pre:
@mkdir -p lib
libs: genetics nn
libs: ml genetics nn
test: all
make -C tests
nn: | nn_build lib/NeuronNetwork.a lib/NeuronNetwork.so
lib/NeuronNetwork.so: ./src/NeuronNetwork/NeuronNetwork.so
cp ./src/NeuronNetwork/NeuronNetwork.so ./lib/
ml: | ml_build lib/MachineLearning.a lib/MachineLearning.so
lib/NeuronNetwork.a: ./src/NeuronNetwork/NeuronNetwork.a
cp ./src/NeuronNetwork/NeuronNetwork.a ./lib/
cp ./src/NeuronNetwork/NeuronNetwork.nm ./lib/
lib/MachineLearning.so: ./src/MachineLearning/MachineLearning.so
cp ./src/MachineLearning/MachineLearning.so ./lib/
lib/MachineLearning.a: ./src/MachineLearning/MachineLearning.a
cp ./src/MachineLearning/MachineLearning.a ./lib/
cp ./src/MachineLearning/MachineLearning.nm ./lib/
ml_build:
@make -C src/MachineLearning
nn: | nn_build lib/NeuralNetwork.a lib/NeuralNetwork.so
lib/NeuralNetwork.so: ./src/NeuralNetwork/NeuralNetwork.so
cp ./src/NeuralNetwork/NeuralNetwork.so ./lib/
lib/NeuralNetwork.a: ./src/NeuralNetwork/NeuralNetwork.a
cp ./src/NeuralNetwork/NeuralNetwork.a ./lib/
cp ./src/NeuralNetwork/NeuralNetwork.nm ./lib/
nn_build:
@make -C src/NeuronNetwork
@make -C src/NeuralNetwork
genetics: | genetics_build lib/Genetics.a lib/Genetics.so
@@ -37,10 +49,14 @@ lib/Genetics.a: ./src/Genetics/Genetics.a
genetics_build:
@make -C src/Genetics
documentation:
doxygen
clean:
@make -C src/MachineLearning clean
@make -C src/Genetics clean
@make -C src/NeuronNetwork clean
@make -C src/NeuralNetwork clean
@make -C tests clean
#@rm -f ./*.so ./*.a ./*.nm
@rm -f ./lib/*.so ./lib/*.a ./lib/*.nm
@echo "Cleaned....."
@echo "Cleaned....."

View File

@@ -5,7 +5,7 @@ CXXFLAGS+= -std=c++14
CXXFLAGS+= -pg -fPIC
CXXFLAGS+= -g
CXXFLAGS+= -fPIC -pthread
#CXXFLAGS+= -DUSE_SSE2
OPTIMALIZATION = -O3 -march=native -mtune=native
%.o : %.cpp %.h

255
doc/customdoxygen.css Normal file
View File

@@ -0,0 +1,255 @@
h1, .h1, h2, .h2, h3, .h3{
font-weight: 200 !important;
}
#navrow1, #navrow2, #navrow3, #navrow4, #navrow5{
border-bottom: 1px solid #EEEEEE;
}
.adjust-right {
margin-left: 30px !important;
font-size: 1.15em !important;
}
.navbar{
border: 0px solid #222 !important;
}
/* Sticky footer styles
-------------------------------------------------- */
html,
body {
height: 100%;
/* The html and body elements cannot have any padding or margin. */
}
/* Wrapper for page content to push down footer */
#wrap {
min-height: 100%;
height: auto;
/* Negative indent footer by its height */
margin: 0 auto -60px;
/* Pad bottom by footer height */
padding: 0 0 60px;
}
/* Set the fixed height of the footer here */
#footer {
font-size: 0.9em;
padding: 8px 0px;
background-color: #f5f5f5;
}
.footer-row {
line-height: 44px;
}
#footer > .container {
padding-left: 15px;
padding-right: 15px;
}
.footer-follow-icon {
margin-left: 3px;
text-decoration: none !important;
}
.footer-follow-icon img {
width: 20px;
}
.footer-link {
padding-top: 5px;
display: inline-block;
color: #999999;
text-decoration: none;
}
.footer-copyright {
text-align: center;
}
@media (min-width: 992px) {
.footer-row {
text-align: left;
}
.footer-icons {
text-align: right;
}
}
@media (max-width: 991px) {
.footer-row {
text-align: center;
}
.footer-icons {
text-align: center;
}
}
/* DOXYGEN Code Styles
----------------------------------- */
a.qindex {
font-weight: bold;
}
a.qindexHL {
font-weight: bold;
background-color: #9CAFD4;
color: #ffffff;
border: 1px double #869DCA;
}
.contents a.qindexHL:visited {
color: #ffffff;
}
a.code, a.code:visited, a.line, a.line:visited {
color: #4665A2;
}
a.codeRef, a.codeRef:visited, a.lineRef, a.lineRef:visited {
color: #4665A2;
}
/* @end */
dl.el {
margin-left: -1cm;
}
pre.fragment {
border: 1px solid #C4CFE5;
background-color: #FBFCFD;
padding: 4px 6px;
margin: 4px 8px 4px 2px;
overflow: auto;
word-wrap: break-word;
font-size: 9pt;
line-height: 125%;
font-family: monospace, fixed;
font-size: 105%;
}
div.fragment {
padding: 4px 6px;
margin: 4px 8px 4px 2px;
border: 1px solid #C4CFE5;
}
div.line {
font-family: monospace, fixed;
font-size: 13px;
min-height: 13px;
line-height: 1.0;
text-wrap: unrestricted;
white-space: -moz-pre-wrap; /* Moz */
white-space: -pre-wrap; /* Opera 4-6 */
white-space: -o-pre-wrap; /* Opera 7 */
white-space: pre-wrap; /* CSS3 */
word-wrap: break-word; /* IE 5.5+ */
text-indent: -53px;
padding-left: 53px;
padding-bottom: 0px;
margin: 0px;
-webkit-transition-property: background-color, box-shadow;
-webkit-transition-duration: 0.5s;
-moz-transition-property: background-color, box-shadow;
-moz-transition-duration: 0.5s;
-ms-transition-property: background-color, box-shadow;
-ms-transition-duration: 0.5s;
-o-transition-property: background-color, box-shadow;
-o-transition-duration: 0.5s;
transition-property: background-color, box-shadow;
transition-duration: 0.5s;
}
div.line.glow {
background-color: cyan;
box-shadow: 0 0 10px cyan;
}
span.lineno {
padding-right: 4px;
text-align: right;
border-right: 2px solid #0F0;
background-color: #E8E8E8;
white-space: pre;
}
span.lineno a {
background-color: #D8D8D8;
}
span.lineno a:hover {
background-color: #C8C8C8;
}
div.groupHeader {
margin-left: 16px;
margin-top: 12px;
font-weight: bold;
}
div.groupText {
margin-left: 16px;
font-style: italic;
}
/* @group Code Colorization */
span.keyword {
color: #008000
}
span.keywordtype {
color: #604020
}
span.keywordflow {
color: #e08000
}
span.comment {
color: #800000
}
span.preprocessor {
color: #806020
}
span.stringliteral {
color: #002080
}
span.charliteral {
color: #008080
}
span.vhdldigit {
color: #ff00ff
}
span.vhdlchar {
color: #000000
}
span.vhdlkeyword {
color: #700070
}
span.vhdllogic {
color: #ff0000
}
blockquote {
background-color: #F7F8FB;
border-left: 2px solid #9CAFD4;
margin: 0 24px 0 4px;
padding: 0 12px 0 16px;
}

26
doc/footer.html Normal file
View File

@@ -0,0 +1,26 @@
<!-- HTML footer for doxygen 1.8.8-->
<!-- start footer part -->
<!--BEGIN GENERATE_TREEVIEW-->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
$navpath
<li class="footer">$generatedby
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="$relpath^doxygen.png" alt="doxygen"/></a> $doxygenversion </li>
</ul>
</div>
<!--END GENERATE_TREEVIEW-->
</div>
</div>
</div>
</div>
</div>
<!--BEGIN !GENERATE_TREEVIEW-->
<hr class="footer"/><address class="footer"><small>
$generatedby &#160;<a href="http://www.doxygen.org/index.html">
<img class="footer" src="$relpath^doxygen.png" alt="doxygen"/>
</a> $doxygenversion
</small></address>
<!--END !GENERATE_TREEVIEW-->
</body>
</html>

42
doc/header.html Normal file
View File

@@ -0,0 +1,42 @@
<!-- HTML header for doxygen 1.8.8-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<!-- For Mobile Devices -->
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta name="generator" content="Doxygen $doxygenversion"/>
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<!--<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>-->
<script type="text/javascript" src="$relpath^dynsections.js"></script>
$treeview
$search
$mathjax
<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
$extrastylesheet
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/js/bootstrap.min.js"></script>
<script type="text/javascript" src="doxy-boot.js"></script>
</head>
<body>
<nav class="navbar navbar-default" role="navigation">
<div class="container">
<div class="navbar-header">
<a class="navbar-brand">$projectname $projectnumber</a>
</div>
</div>
</nav>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div class="content" id="content">
<div class="container">
<div class="row">
<div class="col-sm-12 panel panel-default" style="padding-bottom: 15px;">
<div style="margin-bottom: 15px;">
<!-- end header part -->

120
doc/html/doxy-boot.js Normal file
View File

@@ -0,0 +1,120 @@
$( document ).ready(function() {
$("div.headertitle").addClass("page-header");
$("div.title").addClass("h1");
$('li > a[href="index.html"] > span').before("<i class='fa fa-cog'></i> ");
$('li > a[href="modules.html"] > span').before("<i class='fa fa-square'></i> ");
$('li > a[href="namespaces.html"] > span').before("<i class='fa fa-bars'></i> ");
$('li > a[href="annotated.html"] > span').before("<i class='fa fa-list-ul'></i> ");
$('li > a[href="classes.html"] > span').before("<i class='fa fa-book'></i> ");
$('li > a[href="inherits.html"] > span').before("<i class='fa fa-sitemap'></i> ");
$('li > a[href="functions.html"] > span').before("<i class='fa fa-list'></i> ");
$('li > a[href="functions_func.html"] > span').before("<i class='fa fa-list'></i> ");
$('li > a[href="functions_vars.html"] > span').before("<i class='fa fa-list'></i> ");
$('li > a[href="functions_enum.html"] > span').before("<i class='fa fa-list'></i> ");
$('li > a[href="functions_eval.html"] > span').before("<i class='fa fa-list'></i> ");
$('img[src="ftv2ns.png"]').replaceWith('<span class="label label-danger">N</span> ');
$('img[src="ftv2cl.png"]').replaceWith('<span class="label label-danger">C</span> ');
$("ul.tablist").addClass("nav nav-pills nav-justified");
$("ul.tablist").css("margin-top", "0.5em");
$("ul.tablist").css("margin-bottom", "0.5em");
$("li.current").addClass("active");
$("iframe").attr("scrolling", "yes");
$("#nav-path > ul").addClass("breadcrumb");
$("table.params").addClass("table");
$("div.ingroups").wrapInner("<small></small>");
$("div.levels").css("margin", "0.5em");
$("div.levels > span").addClass("btn btn-default btn-xs");
$("div.levels > span").css("margin-right", "0.25em");
$("table.directory").addClass("table table-striped");
$("div.summary > a").addClass("btn btn-default btn-xs");
$("table.fieldtable").addClass("table");
$(".fragment").addClass("well");
$(".memitem").addClass("panel panel-default");
$(".memproto").addClass("panel-heading");
$(".memdoc").addClass("panel-body");
$("span.mlabel").addClass("label label-info");
$("table.memberdecls").addClass("table");
$("[class^=memitem]").addClass("active");
$("div.ah").addClass("btn btn-default");
$("span.mlabels").addClass("pull-right");
$("table.mlabels").css("width", "100%")
$("td.mlabels-right").addClass("pull-right");
$("div.ttc").addClass("panel panel-primary");
$("div.ttname").addClass("panel-heading");
$("div.ttname a").css("color", 'white');
$("div.ttdef,div.ttdoc,div.ttdeci").addClass("panel-body");
$('#MSearchBox').parent().remove();
$('div.fragment.well div.line:first').css('margin-top', '15px');
$('div.fragment.well div.line:last').css('margin-bottom', '15px');
$('table.doxtable').removeClass('doxtable').addClass('table table-striped table-bordered').each(function(){
$(this).prepend('<thead></thead>');
$(this).find('tbody > tr:first').prependTo($(this).find('thead'));
$(this).find('td > span.success').parent().addClass('success');
$(this).find('td > span.warning').parent().addClass('warning');
$(this).find('td > span.danger').parent().addClass('danger');
});
if($('div.fragment.well div.ttc').length > 0)
{
$('div.fragment.well div.line:first').parent().removeClass('fragment well');
}
$('table.memberdecls').find('.memItemRight').each(function(){
$(this).contents().appendTo($(this).siblings('.memItemLeft'));
$(this).siblings('.memItemLeft').attr('align', 'left');
});
function getOriginalWidthOfImg(img_element) {
var t = new Image();
t.src = (img_element.getAttribute ? img_element.getAttribute("src") : false) || img_element.src;
return t.width;
}
$('div.dyncontent').find('img').each(function(){
if(getOriginalWidthOfImg($(this)[0]) > $('#content>div.container').width())
$(this).css('width', '100%');
});
$(".memitem").removeClass('memitem');
$(".memproto").removeClass('memproto');
$(".memdoc").removeClass('memdoc');
$("span.mlabel").removeClass('mlabel');
$("table.memberdecls").removeClass('memberdecls');
$("[class^=memitem]").removeClass('memitem');
$("span.mlabels").removeClass('mlabels');
$("table.mlabels").removeClass('mlabels');
$("td.mlabels-right").removeClass('mlabels-right');
$(".navpath").removeClass('navpath');
$("li.navelem").removeClass('navelem');
$("a.el").removeClass('el');
$("div.ah").removeClass('ah');
$("div.header").removeClass("header");
$('.mdescLeft').each(function(){
if($(this).html()=="&nbsp;") {
$(this).siblings('.mdescRight').attr('colspan', 2);
$(this).remove();
}
});
$('td.memItemLeft').each(function(){
if($(this).siblings('.memItemRight').html()=="") {
$(this).attr('colspan', 2);
$(this).siblings('.memItemRight').remove();
}
});
});

9
mainpage.dox Normal file
View File

@@ -0,0 +1,9 @@
/**
@mainpage Artificial Neural Network Library project documentation
@brief C++ library for Artificial Neural Networks
@author Tomas Cernik (Tom.Cernik@gmail.com)
TODO
*/

View File

@@ -1,6 +1,6 @@
#include "./IO"
Shin::NeuronNetwork::IO Shin::NeuronNetwork::IO::operator+(const IO &r)
Shin::IO Shin::IO::operator+(const IO &r)
{
Shin::NeuronNetwork::IO tmp;
for(float a:this->data)

View File

@@ -6,13 +6,11 @@
namespace Shin
{
namespace NeuronNetwork
{
class IO
{
public:
IO() {};
IO(std::vector<float> &d) : data(d) {}
IO(const std::vector<float> &d) : data(d) {}
IO(const IO &old) : data(old.data) {}
IO(const std::initializer_list<float> &a):data(a) { }
virtual ~IO() {};
@@ -27,5 +25,4 @@ class IO
private:
};
}
}
#endif

View File

@@ -0,0 +1,35 @@
#ifndef _S_ML_LEARNING_H_
#define _S_ML_LEARNING_H_
#include <cstddef>
namespace Shin
{
namespace MachineLearning
{
const float LearningCoeficient=0.4;
const float DefaultNoiseSize=500;
class Learning
{
public:
inline Learning() {};
inline virtual ~Learning() {};
inline virtual void setLearningCoeficient (const float& coef) { learningCoeficient=coef; };
inline virtual void allowThreading() final {allowThreads=1;}
inline virtual void disableThreading() final {allowThreads=0;}
inline virtual void allowNoise() final {noise=1;}
inline virtual void disableNoise() final {noise=0;}
inline virtual void setNoiseSize(const unsigned& milipercents) final { noiseSize=milipercents; }
protected:
float learningCoeficient=Shin::MachineLearning::LearningCoeficient;
bool allowThreads=0;
bool noise=0;
unsigned noiseSize=Shin::MachineLearning::DefaultNoiseSize;
};
}
}
#endif

View File

@@ -1,12 +1,9 @@
OBJFILES=\
FeedForward.o\
Learning/Supervised.o Learning/BackPropagation.o Learning/OpticalBackPropagation.o\
Learning/Unsupervised.o Learning/Reinforcement.o Learning/RL/QFunction.o Learning/QLearning.o\
./IO.o
QLearning.o
LINKFILES= ../sse_mathfun.o
LINKFILES=
LIBNAME=NeuronNetwork
LIBNAME=MachineLearning
include ../../Makefile.const
@@ -17,7 +14,7 @@ lib: $(LIBNAME).so $(LIBNAME).a
$(LIBNAME).so: $(OBJFILES)
$(CXX) -shared $(CXXFLAGS) $(OBJFILES) $(LINKFILES) -o $(LIBNAME).so
$(LIBNAME).a: $(OBJFILES) ./Neuron.h ./Network.h ./Solution.h ./Problem.h
$(LIBNAME).a: $(OBJFILES) ./Learning.h
rm -f $(LIBNAME).a # create new library
ar rcv $(LIBNAME).a $(OBJFILES) $(LINKFILES)
ranlib $(LIBNAME).a

View File

@@ -45,18 +45,6 @@ void Shin::NeuronNetwork::RL::QFunctionTable::learn(Shin::NeuronNetwork::Solutio
solution->second.second++;
}
}
void Shin::NeuronNetwork::RL::QFunctionTable::learnDelayed(std::vector< std::pair< Shin::NeuronNetwork::Solution, Shin::NeuronNetwork::Problem > >& p, float quality)
{
for(int i=p.size()-1;i>=0;i--)
{
auto &pair=p[i];
learn(pair.first,pair.second,quality);
quality*=0.3;
}
}
Shin::NeuronNetwork::RL::QFunctionNetwork::QFunctionNetwork() : b(nullptr),function(nullptr)
{
@@ -80,46 +68,6 @@ void Shin::NeuronNetwork::RL::QFunctionNetwork::initialiseNetwork(size_t input,
}
}
void Shin::NeuronNetwork::RL::QFunctionNetwork::learnDelayed(std::vector< std::pair< Shin::NeuronNetwork::Solution, Shin::NeuronNetwork::Problem > >& p, float quality)
{
if(quality>0)
{
b->setLearningCoeficient(learningA);
// b->setLearningCoeficient(0.05);
}else
{
b->setLearningCoeficient(learningB);
// b->setLearningCoeficient(0.008);
}
for(int i=p.size()-1;i>=0;i--)
{
learn(p[i].first,p[i].second,quality);
quality*=0.95;
}
}
void Shin::NeuronNetwork::RL::QFunctionNetwork::learnDelayed(std::vector< std::pair< Shin::NeuronNetwork::Problem,int> >& p, float quality) // TODO there must be better way
{
std::vector<std::pair<Solution,Problem>> q;
register int solSize=0;
if(p.size()>0)
solSize=function->solve(p[0].first).size();
if (!solSize)
return;
for(size_t i=0;i<p.size();i++)
{
Solution s;
for(int j=0;j<solSize;j++)
{
s.push_back(j==p[i].second?1:0);
}
q.push_back(std::pair<Solution,Problem>(s,p[i].first));
}
learnDelayed(q,quality);
}
void Shin::NeuronNetwork::RL::QFunctionNetwork::learn(Shin::NeuronNetwork::Solution& s, Shin::NeuronNetwork::Problem& p, float quality)
{
register int loops=abs(quality)/10;

View File

@@ -3,28 +3,29 @@
#include <map>
#include "../../Solution.h"
#include "../../FeedForward.h"
#include "../BackPropagation.h"
#include "../OpticalBackPropagation.h"
#include "Unsupervised.h"
#include "../Solution.h"
//#include "../FeedForward.h"
//#include "BackPropagation.h"
//#include "OpticalBackPropagation.h"
namespace Shin
{
namespace NeuronNetwork
{
namespace RL
namespace MachineLearning
{
class QFunction
{
public:
QFunction();
virtual ~QFunction();
virtual void learnDelayed(std::vector<std::pair<Solution,Problem>> &p, float quality)=0;
virtual void learn(Solution &s, Problem &p, float quality)=0;
//virtual void learnDelayed(std::vector<std::pair<Solution,Problem>> &p, float quality)=0;
//virtual void learn(Solution &s, Problem &p, float quality)=0;
protected:
float learningCoeficient;
};
/*
class QFunctionTable : public QFunction
{
public:
@@ -83,15 +84,11 @@ namespace RL
virtual int getChoice(Problem &p);
virtual Solution getSolution(Problem &p) {return function->solve(p);}
void setLearningCoeficient(double ok, double err) {learningA=ok;learningB=err;};
void opticalBackPropagation() {delete b; b=new Learning::OpticalBackPropagation(*function);};
private:
Learning::BackPropagation *b;
FeedForward * function;
float learningA=0.05;
float learningB=0.008;
};
}
*/
}
}

View File

@@ -0,0 +1,32 @@
#include "./QLearning"
void Shin::MachineLearning::QLearning::learnDelayed(std::vector< std::pair< Shin::Problem, int > >& p, float quality)
{
std::vector<std::pair<Problem,Solution>> q;
register int solSize=0;
if(p.size()>0)
solSize=getSolution(p[0].first).size();
if (!solSize)
return;
for(size_t i=0;i<p.size();i++)
{
Solution s;
for(int j=0;j<solSize;j++)
{
s.push_back(j==p[i].second?1:0);
}
q.push_back(std::pair<Problem,Solution>(p[i].first,s));
}
learnDelayed(q,quality);
}
void Shin::MachineLearning::QLearning::learnDelayed(std::vector< std::pair<Shin::Problem, Shin::Solution> >& p, float quality)
{
for(int i=p.size()-1;i>=0;i--)
{
auto &pair=p[i];
learn(pair.first,pair.second,quality);
quality*=0.3;
}
}

View File

@@ -0,0 +1,106 @@
#ifndef _QLEARNING_H_
#define _QLEARNING_H_
#include <cstddef>
#include <map>
#include "Unsupervised.h"
#include "../NeuralNetwork/FeedForward.h"
/*
* #include "BackPropagation.h"
* #include "OpticalBackPropagation.h"
* #include "../FeedForward.h"
* #include "Unsupervised.h"
* #include "QFunction.h"
*/
/*
* http://www2.econ.iastate.edu/tesfatsi/RLUsersGuide.ICAC2005.pdf
* http://www.autonlab.org/tutorials/rl06.pdf
* http://www.nbu.bg/cogs/events/2000/Readings/Petrov/rltutorial.pdf
*
* http://www.applied-mathematics.net/qlearning/qlearning.html
* http://nn.cs.utexas.edu/downloads/papers/stanley.gecco02_1.pdf
*
* http://stackoverflow.com/questions/740389/good-implementations-of-reinforced-learning
*
* http://stackoverflow.com/questions/10722064/training-a-neural-network-with-reinforcement-learning
*
* http://remi.coulom.free.fr/Thesis/
* http://remi.coulom.free.fr/Publications/Thesis.pdf
*
* http://link.springer.com/article/10.1007/BF00992696
*
* http://scholar.google.cz/scholar?start=10&q=reinforcement+learning+feedforward&hl=en&as_sdt=0,5&as_vis=1
*
*/
namespace Shin
{
namespace MachineLearning
{
class QLearning: Learning
{
public:
inline QLearning() {};
virtual ~QLearning() {} ;
QLearning(const QLearning&) =delete;
QLearning& operator=(const QLearning&) =delete;
virtual void learnDelayed(std::vector<std::pair<Problem,Solution>> &p, float quality) final;
virtual void learnDelayed(std::vector<std::pair<Problem,int>> &p, float quality) final;
virtual void learn(Problem &p,Solution &s, const float& quality)=0;
virtual void learn(Problem &p,int action, const float& quality)=0;
inline virtual void setLearningCoeficient(const float& a) override {setLearningCoeficient(a,a);};
inline void setLearningCoeficient(const float& ok, const float& err) {learningA=ok;learningB=err;};
virtual Solution getSolution(Problem &p)=0;
virtual int getChoice(Problem &p)=0;
protected:
float learningA=0.05;
float learningB=0.008;
};
class QLearningNetwork : public QLearning
{
public:
QLearningNetwork(size_t input, size_t size, size_t actions): QLearning(),function({input,size,actions}),actionsSize(actions) {}
QLearningNetwork(std::initializer_list<size_t> s): QLearning(),function(s),actionsSize(*s.end()) {}
QLearningNetwork(const QLearningNetwork&)=delete;
QLearningNetwork operator=(const QLearningNetwork&)=delete;
virtual void learn(Problem &p,Solution &s, const float& quality) override;
virtual void learn(Problem &p,int action, const float& quality) override;
virtual Solution getSolution(Problem &p) override {return function.solve(p);}
virtual int getChoice(Problem &p) override;
protected:
Shin::NeuralNetwork::FeedForward function;
size_t actionsSize;
};
class QLearningTable : public QLearning
{
public:
QLearningTable():QLearning(),data() {};
QLearningTable(const QLearningTable&)=delete;
QLearningTable operator=(const QLearningTable&)=delete;
virtual void learn(Problem &p,Solution &s, const float& quality) override;
virtual void learn(Problem &p,int action, const float& quality) override;
virtual Solution getSolution(Problem &p) override;
virtual int getChoice(Problem &p) override;
protected:
std::map<Problem,std::map<int,std::pair<float,int>>> data;
};
}
}
#endif

View File

@@ -0,0 +1,18 @@
#ifndef _UNSUPERVISEDLEARNING_H_
#define _UNSUPERVISEDLEARNING_H_
#include "./Learning.h"
namespace Shin
{
namespace MachineLearning
{
class Unsupervised : public Learning
{
public:
Unsupervised(): Learning() {};
virtual ~Unsupervised() {};
};
}
}
#endif

View File

@@ -1,6 +1,6 @@
#include "FeedForward"
using namespace Shin::NeuronNetwork;
using namespace Shin::NeuralNetwork;
FFLayer::~FFLayer()
{
@@ -21,7 +21,7 @@ FFNeuron& FFLayer::operator[](const size_t& neuron)
neurons=new FFNeuron*[layerSize];
for(size_t i=0;i<layerSize;i++)
{
neurons[i]=new FFNeuron(potentials[i],weights[i],sums[i],inputs[i],lambda);
neurons[i]=new FFNeuron(potentials[i],weights[i],outputs[i],inputs[i],lambda,function);
}
}
@@ -33,17 +33,19 @@ FFNeuron& FFLayer::operator[](const size_t& neuron)
}
FeedForward::FeedForward(std::initializer_list< int > s, double lam): ACyclicNetwork(lam),layers(s.size())
FeedForward::FeedForward(std::initializer_list<size_t> s, double lam, FeedForwardInitializer weightInit): ACyclicNetwork(lam),layers(s.size())
{
transfer = new TransferFunction::TransferFunction*[s.size()];
weights= new float**[s.size()];
potentials= new float*[s.size()];
layerSizes= new size_t[s.size()];
sums= new float*[s.size()];
outputs= new float*[s.size()];
inputs= new float*[s.size()];
int i=0;
int prev_size=1;
register int i=0;
register int prev_size=1;
for(int layeSize:s) // TODO rename
{
transfer[i]= new TransferFunction::Sigmoid(lam);
layeSize+=1;
if(i==0)
{
@@ -52,18 +54,18 @@ FeedForward::FeedForward(std::initializer_list< int > s, double lam): ACyclicNet
layerSizes[i]=layeSize;
weights[i]= new float*[layeSize];
potentials[i]= new float[layeSize];
sums[i]= new float[layeSize];
outputs[i]= new float[layeSize];
inputs[i]= new float[layeSize];
potentials[i][0]=1.0;
sums[i][0]=1.0;
outputs[i][0]=1.0;
for (int j=1;j<layeSize;j++)
{
potentials[i][j]=1.0;
weights[i][j]= new float[prev_size];
for(int k=0;k<prev_size;k++)
{
weights[i][j][k]=1.0-((float)(rand()%2001))/1000.0;
weights[i][j][k]=weightInit(i,j,k);
}
}
i++;
@@ -82,13 +84,13 @@ FeedForward::~FeedForward()
}
delete[] weights[i];
delete[] potentials[i];
delete[] sums[i];
delete[] outputs[i];
delete[] inputs[i];
}
delete[] weights;
delete[] potentials;
delete[] layerSizes;
delete[] sums;
delete[] outputs;
delete[] inputs;
}
if(ffLayers !=nullptr)
@@ -103,13 +105,12 @@ FeedForward::~FeedForward()
void FeedForward::solvePart(float *newSolution, register size_t begin, size_t end,size_t prevSize, float *sol,size_t layer)
{
if(prevSize >4)
TransferFunction::StreamingTransferFunction *function=dynamic_cast<TransferFunction::StreamingTransferFunction*>(transfer[layer]);
if(prevSize >=4 && function !=nullptr)
{
__m128 partialSolution;
__m128 w;
__m128 sols;
__m128 temporaryConst1=_mm_set1_ps(1.0);
__m128 temporaryConstLambda=_mm_set1_ps(-lambda);
register size_t alignedPrev=prevSize>16?(prevSize-(prevSize%16)):0;
for( size_t j=begin;j<end;j++)
{
@@ -122,45 +123,43 @@ void FeedForward::solvePart(float *newSolution, register size_t begin, size_t en
w=_mm_mul_ps(w,sols);
partialSolution=_mm_add_ps(partialSolution,w);
}
for(register size_t k=0;k<alignedPrev;k+=4)
register float* memory=this->weights[layer][j];
for(register size_t k=0;k<alignedPrev;k+=sizeof(float))
{
w = _mm_load_ps(this->weights[layer][j]+k);
w = _mm_load_ps(memory+k);
sols = _mm_load_ps(sol+k);
w=_mm_mul_ps(w,sols);
partialSolution=_mm_add_ps(partialSolution,w);
}
/* pre-SSE3 solution
__m128 temp = _mm_add_ps(_mm_movehl_ps(foo128, foo128), foo128);
float x;
_mm_store_ss(&x, _mm_add_ss(temp, _mm_shuffle_ps(temp, 1)));
*/
#ifdef USE_SSE2 //pre-SSE3 solution
partialSolution= _mm_add_ps(_mm_movehl_ps(partialSolution, partialSolution), partialSolution);
partialSolution=_mm_add_ss(partialSolution, _mm_shuffle_ps(partialSolution,partialSolution, 1));
#else
partialSolution = _mm_hadd_ps(partialSolution, partialSolution);
partialSolution = _mm_hadd_ps(partialSolution, partialSolution);
#endif
_mm_store_ss(inputs[layer]+j,partialSolution);
partialSolution=_mm_mul_ps(temporaryConstLambda,partialSolution); //-lambda*sol[k]
partialSolution=exp_ps(partialSolution); //exp(sols)
partialSolution= _mm_add_ps(partialSolution,temporaryConst1); //1+exp()
partialSolution= _mm_div_ps(temporaryConst1,partialSolution);//1/....*/
partialSolution=function->operator()(partialSolution);
_mm_store_ss(newSolution+j,partialSolution);
}
}else
{
for( size_t j=begin;j<end;j++)
{
register float tmp=0;
float tmp=0;
for(register size_t k=0;k<prevSize;k++)
{
tmp+=sol[k]*weights[layer][j][k];
}
newSolution[j]=(1.0/(1.0+exp(-lambda*tmp)));
inputs[layer][j]=tmp;
newSolution[j]=transfer[layer]->operator()(tmp);
}
}
}
Solution FeedForward::solve(const Problem& p)
Shin::Solution FeedForward::solve(const Shin::Problem& p)
{
register float* sol=sums[0];
register float* sol=outputs[0];
sol[0]=1;
for(size_t i=0;i<p.size();i++)
@@ -169,23 +168,20 @@ Solution FeedForward::solve(const Problem& p)
register size_t prevSize=layerSizes[0];
for(register size_t i=1;i<layers;i++)
{
float* newSolution= sums[i];
float* newSolution= outputs[i];
if(threads > 1 && (layerSizes[i] > 700 ||prevSize > 700)) // 700 is an guess about actual size, when creating thread has some speedup
{
std::vector<std::thread> th;
size_t s=1;
size_t step =layerSizes[i]/threads;
for(size_t t=1;t<=threads;t++)
register size_t step =layerSizes[i]/threads;
for(size_t t=1;t<threads;t++)
{
//TODO do i need it to check?
if(s>=layerSizes[i])
break;
th.push_back(std::thread([i,this,newSolution,prevSize,sol](size_t from, size_t to)->void{
solvePart(newSolution,from,to,prevSize,sol,i);
},s,t==threads?layerSizes[i]:s+step));//{}
},s,s+step));
s+=step;
}
solvePart(newSolution,s,layerSizes[i],prevSize,sol,i);
for (auto& thr : th)
thr.join();
}else
@@ -210,7 +206,7 @@ FFLayer& FeedForward::operator[](const size_t& l)
ffLayers=new FFLayer*[layers];
for(size_t i=0;i<layers;i++)
{
ffLayers[i]=new FFLayer(layerSizes[i],potentials[i],weights[i],sums[i],inputs[i],lambda);
ffLayers[i]=new FFLayer(layerSizes[i],potentials[i],weights[i],outputs[i],inputs[i],lambda,*transfer[i]);
}
}

View File

@@ -1,14 +1,17 @@
#ifndef _S_NN_FF_H_
#define _S_NN_FF_H_
#include "Problem"
#include "Solution"
#include "Neuron"
#include "../Problem"
#include "../Solution"
#include "Network"
#include "TransferFunction/Sigmoid.h"
#include "TransferFunction/TransferFunction.h"
#include <vector>
#include <initializer_list>
#include <thread>
#include <pthread.h>
#include <iostream>
#include <math.h>
@@ -23,30 +26,31 @@
namespace Shin
{
namespace NeuronNetwork
namespace NeuralNetwork
{
class FFNeuron : public Neuron
{
public:
inline FFNeuron(float &pot, float *w, float &outputF, float &i,float lam,TransferFunction::TransferFunction &fun):function(fun),potential(pot),weights(w),out(outputF),inputs(i),lambda(lam) { }
FFNeuron() = delete;
FFNeuron(const FFNeuron&) = delete;
FFNeuron& operator=(const FFNeuron&) = delete;
FFNeuron(float &pot, float *w, float &s, float &i,float lam):potential(pot),weights(w),sum(s),inputs(i),lambda(lam) { }
inline virtual float getPotential() const override {return potential;}
inline virtual void setPotential(const float& p) override { potential=p;}
inline virtual float getWeight(const size_t& i ) const override { return weights[i];}
inline virtual void setWeight(const size_t& i,const float &p) override { weights[i]=p; }
inline virtual float output() const override { return sum; }
inline virtual float output() const override { return out; }
inline virtual float input() const override { return inputs; }
inline virtual float derivatedOutput() const override { return lambda*output()*(1.0-output()); }
inline virtual float derivatedOutput() const override { return function.derivatedOutput(inputs,out); }
protected:
TransferFunction::TransferFunction &function;
float &potential;
float *weights;
float &sum;
float &out;
float &inputs;
float lambda;
private:
@@ -55,7 +59,7 @@ namespace NeuronNetwork
class FFLayer: public Layer
{
public:
FFLayer(size_t s, float *p,float **w,float *su,float *in,float lam): layerSize(s),potentials(p),weights(w),sums(su),inputs(in),lambda(lam) {}
inline FFLayer(size_t s, float *p,float **w,float *out,float *in,float lam,TransferFunction::TransferFunction &fun): function(fun), layerSize(s),potentials(p),weights(w),outputs(out),inputs(in),lambda(lam) {}
~FFLayer();
FFLayer(const FFLayer &) = delete;
@@ -64,22 +68,48 @@ namespace NeuronNetwork
virtual FFNeuron& operator[](const size_t& layer) override;
inline virtual size_t size() const override {return layerSize;};
protected:
TransferFunction::TransferFunction &function;
FFNeuron **neurons=nullptr;
size_t layerSize;
float *potentials;
float **weights;
float *sums;
float *outputs;
float *inputs;
float lambda;
};
/**
* @brief typedef for FeedForward network initializating function
*/
typedef std::function<float(const size_t&layer, const size_t &neuron, const size_t &weight)> FeedForwardInitializer;
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Class representing FeedForward network
* @see ACyclicNetwork
*/
class FeedForward:public ACyclicNetwork
{
public:
FeedForward(std::initializer_list<int> s, double lam=Shin::NeuronNetwork::lambda);
/**
* @brief Constructor for FeedForward
* @param s is initiaizer for layers (it's sizes)
* @param lam is parametr for TransferFunction
* @param weightInit is weight initializer function
*/
FeedForward(std::initializer_list<size_t> s, double lam=Shin::NeuralNetwork::lambda,
FeedForwardInitializer weightInit=
[](const size_t&, const size_t &, const size_t &)->float{ return 1.0-((float)(rand()%2001))/1000.0;}
);
virtual ~FeedForward();
/**
* @brief we don't want to allow network to be copied
*/
FeedForward(const FeedForward &f) = delete; //TODO
/**
* @brief we don't want to allow network to be copied
*/
FeedForward operator=(const FeedForward &f)=delete;
virtual Solution solve(const Problem& p) override;
@@ -91,10 +121,11 @@ namespace NeuronNetwork
FFLayer **ffLayers=nullptr;
float ***weights=nullptr;
float **potentials=nullptr;
float **sums=nullptr;
float **outputs=nullptr;
float **inputs=nullptr;
TransferFunction::TransferFunction **transfer=nullptr;
size_t *layerSizes=nullptr;
size_t layers;
size_t layers;/**< Number of layers */
};
}

View File

@@ -1,12 +1,6 @@
#include "./BackPropagation"
#include <thread>
Shin::NeuronNetwork::Learning::BackPropagation::BackPropagation(FeedForward &n): Supervised(n)
{
}
Shin::NeuronNetwork::Learning::BackPropagation::~BackPropagation()
Shin::NeuralNetwork::Learning::BackPropagation::~BackPropagation()
{
if(deltas!=nullptr)
{
@@ -16,7 +10,7 @@ Shin::NeuronNetwork::Learning::BackPropagation::~BackPropagation()
delete[] deltas;
}
void Shin::NeuronNetwork::Learning::BackPropagation::propagate(const Shin::NeuronNetwork::Solution& expectation)
void Shin::NeuralNetwork::Learning::BackPropagation::propagate(const Shin::Solution& expectation)
{
if(deltas==nullptr)
@@ -93,9 +87,9 @@ void Shin::NeuronNetwork::Learning::BackPropagation::propagate(const Shin::Neuro
}
float Shin::NeuronNetwork::Learning::BackPropagation::teach(const Shin::NeuronNetwork::Problem& p, const Shin::NeuronNetwork::Solution& solution)
float Shin::NeuralNetwork::Learning::BackPropagation::teach(const Shin::Problem& p, const Shin::Solution& solution)
{
Shin::NeuronNetwork::Solution a=network.solve(p);
Shin::Solution a=network.solve(p);
double error=calculateError(solution,a);
Solution s;

View File

@@ -2,11 +2,12 @@
#define _BACK_PROPAGATION_H_
#include <math.h>
#include <thread>
#include <cstddef>
#include "../Solution.h"
#include "../../Solution.h"
#include "../FeedForward.h"
#include "Supervised"
#include "Learning.h"
/*
* http://sydney.edu.au/engineering/it/~comp4302/ann4-3s.pdf
@@ -22,22 +23,24 @@
namespace Shin
{
namespace NeuronNetwork
namespace NeuralNetwork
{
namespace Learning
{
class BackPropagation : public Supervised
class BackPropagation : public Learning
{
public:
BackPropagation(FeedForward &n);
BackPropagation(FeedForward &n): Learning(), network(n) {}
virtual ~BackPropagation();
BackPropagation(const Shin::NeuronNetwork::Learning::BackPropagation&) =delete;
BackPropagation operator=(const Shin::NeuronNetwork::Learning::BackPropagation&) =delete;
BackPropagation(const Shin::NeuralNetwork::Learning::BackPropagation&) =delete;
BackPropagation operator=(const Shin::NeuralNetwork::Learning::BackPropagation&) =delete;
float teach(const Problem &p,const Solution &solution);
virtual void propagate(const Solution& expectation);
float teach(const Shin::NeuronNetwork::Problem &p,const Solution &solution);
virtual void propagate(const Shin::NeuronNetwork::Solution& expectation);
protected:
FeedForward &network;
inline virtual float correction(const float& expected, const float& computed) { return expected - computed;};
float **deltas=nullptr;

View File

@@ -0,0 +1,21 @@
#include "Learning.h"
float Shin::NeuralNetwork::Learning::Learning::calculateError(const Shin::Solution& expectation, const Shin::Solution& solution)
{
register float a=0;
for (size_t i=0;i<expectation.size();i++)
{
a+=pow(expectation[i]-solution[i],2)/2;
}
return a;
}
float Shin::NeuralNetwork::Learning::Learning::teachSet(const std::vector<std::pair<Shin::Problem,Shin::Solution>> &set)
{
double error=0;
for (register size_t i=0;i<set.size();i++)
{
error+=teach(set[i].first,set[i].second);
}
return error;
}

View File

@@ -1,30 +1,23 @@
#ifndef _SUPERVISEDLEARNING_H_
#define _SUPERVIESDLERANING_H_
#ifndef _S_NN_LEARNING_H_
#define _S_NN_LEARNING_H_
#include <vector>
#include <set>
#include <cstddef>
#include "../Solution.h"
#include "../../Solution.h"
#include "../FeedForward.h"
namespace Shin
{
namespace NeuronNetwork
namespace NeuralNetwork
{
namespace Learning
{
const float LearningCoeficient=0.4;
class Supervised
class Learning
{
public:
Supervised() =delete;
Supervised(FeedForward &n) : network(n) {};
virtual ~Supervised() {};
float calculateError(const Solution &expectation,const Solution &solution);
virtual float teach(const Shin::NeuronNetwork::Problem &p,const Solution &solution)=0;
virtual float teachSet(const std::vector<std::pair<Problem,Solution>> &set) final;
Learning() {};
inline virtual ~Learning() {};
inline virtual void setLearningCoeficient (const float& coef) { learningCoeficient=coef; };
@@ -35,9 +28,12 @@ namespace Learning
inline virtual void disableNoise() final {noise=0;}
inline virtual void setNoiseSize(const unsigned& milipercents) final { noiseSize=milipercents; }
float calculateError(const Solution &expectation,const Solution &solution);
virtual float teach(const Problem &p,const Solution &solution)=0;
virtual float teachSet(const std::vector<std::pair<Problem,Solution>> &set) final;
protected:
FeedForward &network;
float learningCoeficient=Shin::NeuronNetwork::Learning::LearningCoeficient;
float learningCoeficient=LearningCoeficient;
bool allowThreads=0;
bool noise=0;
unsigned noiseSize=500;

View File

@@ -1,6 +1,6 @@
#include "./OpticalBackPropagation"
float Shin::NeuronNetwork::Learning::OpticalBackPropagation::correction(const float& expected, const float& computed)
float Shin::NeuralNetwork::Learning::OpticalBackPropagation::correction(const float& expected, const float& computed)
{
register float tmp=(expected-computed);
register float ret=1+exp(tmp*tmp);

View File

@@ -10,7 +10,7 @@
namespace Shin
{
namespace NeuronNetwork
namespace NeuralNetwork
{
namespace Learning
{

View File

@@ -0,0 +1,28 @@
OBJFILES=\
FeedForward.o\
Learning/Learning.o Learning/BackPropagation.o Learning/OpticalBackPropagation.o ../sse_mathfun.o
LINKFILES=
LIBNAME=NeuralNetwork
include ../../Makefile.const
all: lib
../sse_mathfun.o: ../sse_mathfun.cpp ../sse_mathfun.h
make -C ../
lib: $(LIBNAME).so $(LIBNAME).a
$(LIBNAME).so: $(OBJFILES)
$(CXX) -shared $(CXXFLAGS) $(OBJFILES) $(LINKFILES) -o $(LIBNAME).so
$(LIBNAME).a: $(OBJFILES) ./Neuron.h ./Network.h ../Solution.h ../Problem.h ./TransferFunction/TransferFunction.h ./TransferFunction/Sigmoid.h
rm -f $(LIBNAME).a # create new library
ar rcv $(LIBNAME).a $(OBJFILES) $(LINKFILES)
ranlib $(LIBNAME).a
nm --demangle $(LIBNAME).a > $(LIBNAME).nm
clean:
@rm -f ./*.o ./*.so ./*.a ./*.nm ./*/*.o

128
src/NeuralNetwork/Network.h Normal file
View File

@@ -0,0 +1,128 @@
#ifndef _S_NN_NN_H_
#define _S_NN_NN_H_
#include <cstdarg>
#include <vector>
#include <initializer_list>
#include "../Problem.h"
#include "../Solution.h"
#include "Neuron.h"
namespace Shin
{
namespace NeuralNetwork
{
/**
* @brief Default value for lambda
*/
const float lambda=0.8;
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Abstract class for all Layers of neurons
*/
class Layer
{
public:
virtual ~Layer() {};
/**
* @brief This is a virtual function for selecting neuron
* @param neuron is position in layer
* @returns Specific neuron
*/
virtual Neuron& operator[](const size_t& neuron)=0;
/**
* @returns Size of layer
*/
virtual size_t size() const=0;
};
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Abstract model of simple Network
*/
class Network
{
public:
/**
* @brief Constructor for Network
* @param lam is parametr for many TransferFunctions
*/
inline Network(double lam):lambda(lam) {};
/**
* @brief Virtual destructor for Network
*/
virtual ~Network() {};
/**
* @brief This is a virtual function for all networks
* @param p is a Problem to be solved
* @returns Solution of Network for Problem
*/
virtual Solution solve(const Problem&p)=0;
/**
* @brief Getter of layer
* @param layer is position fo layer
* @returns Retruns specified layer
*/
virtual Layer& operator[](const size_t &layer)=0;
/**
* @brief Returns parametr for TransferFunctions
* @returns lambda (parametr for TransferFunctions)
*/
inline float getLambda() const {return lambda;}
/**
* @param t is number of threads, if set to 0 or 1 then threading is disabled
* @brief Enables or disables Threaded computing of ANN
*/
inline virtual void setThreads(const unsigned&t) final {threads=t;}
protected:
/**
* @brief Parametr for TransferFunctions
*/
float lambda;
/**
* @brief Number of threads used by network
*/
unsigned threads=1;
};
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Abstract class for all Acyclic networks
*/
class ACyclicNetwork : public Network
{
public:
/**
* @brief Constructor for Acyclic network
* @param lam is parametr for many TransferFunctions
*/
inline ACyclicNetwork(double lam):Network(lam) {};
/**
* @brief Returns size of ANN in layer
* @returns Return number of layer in network
*/
virtual size_t size() const=0;
protected:
private:
};
}
}
#endif

View File

@@ -0,0 +1,64 @@
#ifndef _S_NN_NEURON_H_
#define _S_NN_NEURON_H_
#include <cstdarg>
namespace Shin
{
namespace NeuralNetwork
{
/**
* @author Tomas Cernik (Tom.Cernik@gmail.com)
* @brief Abstract class of neuron. All Neuron classes should derive from this on
*/
class Neuron
{
public:
/**
* @brief virtual destructor for Neuron
*/
virtual ~Neuron() {};
/**
* @brief Returns potential of neuron
*/
virtual float getPotential() const =0;
/**
* @brief Sets potential of neuron
* @param p is new pontential
*/
virtual void setPotential(const float &p) =0;
/**
* @brief Returns weight for w-th input neuron
* @param w is weight of neuron number w
*/
virtual float getWeight(const size_t &w) const =0;
/**
* @brief Sets weight
* @param i is number of neuron
* @param p is new weight for input neuron i
*/
virtual void setWeight(const size_t& i ,const float &p) =0;
/**
* @brief Returns output of neuron
*/
virtual float output() const =0;
/**
* @brief Returns input of neuron
*/
virtual float input() const=0;
/**
* @brief Returns value for derivation of activation function
*/
virtual float derivatedOutput() const=0;
protected:
};
}
}
#endif

View File

@@ -0,0 +1,26 @@
#ifndef _S_NN_PERCEP_H_
#define _S_NN_PERCEP_H_
#include "./FeedForward"
#include "TransferFunction/Heaviside.h"
namespace Shin
{
namespace NeuralNetwork
{
class Perceptron:public FeedForward
{
public:
Perceptron(const size_t &inputSize, const size_t &outputSize):FeedForward({inputSize,outputSize})
{
for(int i=0;i<layers;i++)
{
delete transfer[i];
transfer[i]= new TransferFunction::Heaviside(0.5);
}
};
};
}
}
#endif

View File

@@ -0,0 +1,25 @@
#ifndef __TRAN_HEAVISIDE_H_
#define __TRAN_HEAVISIDE_H_
#include "./TransferFunction.h"
namespace Shin
{
namespace NeuralNetwork
{
namespace TransferFunction
{
class Heaviside: public TransferFunction
{
public:
Sigmoid(const float &lambdaP): lambda(lambdaP) {}
inline virtual float derivatedOutput(const float &input,const float &output) override { return 1.0; }
inline virtual float operator()(const float &x) override { return x>k ? 1.0f : 0.0f; };
protected:
float lambda;
};
}
}
}
#endif

View File

@@ -0,0 +1,24 @@
#ifndef __TRAN_HYPTAN_H_
#define __TRAN_HYPTAN_H_
#include "./TransferFunction.h"
namespace Shin
{
namespace NeuralNetwork
{
namespace TransferFunction
{
class HyperbolicTangent: public TransferFunction
{
public:
HyperbolicTangent(const float& lam=1):lambda(lam) {}
inline virtual float derivatedOutput(const float&,const float &output) override { return lambda*(1-output*output); }
inline virtual float operator()(const float &x) override { return tanh(lambda*x); };
protected:
float lambda;
};
}
}
}
#endif

View File

@@ -0,0 +1,33 @@
#ifndef __TRAN_SIGMOID_H_
#define __TRAN_SIGMOID_H_
#include "./StreamingTransferFunction.h"
namespace Shin
{
namespace NeuralNetwork
{
namespace TransferFunction
{
class Sigmoid: public StreamingTransferFunction
{
public:
Sigmoid(const float lambdaP = 0.8): lambda(lambdaP) {}
inline virtual float derivatedOutput(const float&,const float &output) override { return lambda*output*(1.0f-output); }
inline virtual float operator()(const float &x) override { return 1.0f / (1.0f +exp(-lambda*x) ); };
inline virtual __m128 operator()(__m128 x) override {
x=_mm_mul_ps(temporaryConstLambda,x); //-lambda*sol[k]
x=exp_ps(x); //exp(x)
x= _mm_add_ps(x,temporaryConst1); //1+exp()
x= _mm_div_ps(temporaryConst1,x);//1/....
return x;
}
protected:
float lambda;
__m128 temporaryConst1=_mm_set1_ps(1.0);
__m128 temporaryConstLambda=_mm_set1_ps(-lambda);
};
}
}
}
#endif

View File

@@ -0,0 +1,26 @@
#ifndef __STREAMINGTRAN_FUN_H_
#define __STREAMINGTRAN_FUN_H_
#include <xmmintrin.h>
#include "../../sse_mathfun.h"
#include "./TransferFunction.h"
namespace Shin
{
namespace NeuralNetwork
{
namespace TransferFunction
{
class StreamingTransferFunction : public TransferFunction
{
public:
virtual float derivatedOutput(const float &input,const float &output)=0;
virtual float operator()(const float &x)=0;
virtual __m128 operator()(__m128)=0; // it must be overriden to be used!
};
}
}
}
#endif

View File

@@ -0,0 +1,22 @@
#ifndef __TRAN_FUN_H_
#define __TRAN_FUN_H_
#include <math.h>
namespace Shin
{
namespace NeuralNetwork
{
namespace TransferFunction
{
class TransferFunction
{
public:
virtual ~TransferFunction() {}
virtual float derivatedOutput(const float &input,const float &output)=0;
virtual float operator()(const float &x)=0;
};
}
}
}
#endif

View File

@@ -1,38 +0,0 @@
#include "./QLearning"
Shin::NeuronNetwork::Learning::QLearning::QLearning(size_t input, size_t size, size_t choices):fun()
{
fun.initialiseNetwork(input,size,choices);
}
Shin::NeuronNetwork::Learning::QLearning::~QLearning()
{
}
void Shin::NeuronNetwork::Learning::QLearning::learnDelayed(std::vector< std::pair< Shin::NeuronNetwork::Solution, Shin::NeuronNetwork::Problem > >& p, float quality)
{
fun.learnDelayed(p,quality);
}
void Shin::NeuronNetwork::Learning::QLearning::learnDelayed(std::vector< std::pair< Shin::NeuronNetwork::Problem,int > >& p, float quality)
{
fun.learnDelayed(p,quality);
}
void Shin::NeuronNetwork::Learning::QLearning::learn(Shin::NeuronNetwork::Solution& s, Shin::NeuronNetwork::Problem& p, float quality)
{
fun.learn(s,p,quality);
}
void Shin::NeuronNetwork::Learning::QLearning::learn(Shin::NeuronNetwork::Problem& s, int action, float quality)
{
fun.learn(s,action,quality);
}
int Shin::NeuronNetwork::Learning::QLearning::getChoice(Shin::NeuronNetwork::Problem& p)
{
return fun.getChoice(p);
}

View File

@@ -1,69 +0,0 @@
#ifndef _QLEARNING_H_
#define _QLEARNING_H_
#include <cstddef>
#include <functional>
#include "BackPropagation.h"
#include "OpticalBackPropagation.h"
#include "../Problem.h"
#include "../FeedForward.h"
#include "Unsupervised.h"
#include "RL/QFunction.h"
/*
* http://www2.econ.iastate.edu/tesfatsi/RLUsersGuide.ICAC2005.pdf
* http://www.autonlab.org/tutorials/rl06.pdf
* http://www.nbu.bg/cogs/events/2000/Readings/Petrov/rltutorial.pdf
*
* http://www.applied-mathematics.net/qlearning/qlearning.html
* http://nn.cs.utexas.edu/downloads/papers/stanley.gecco02_1.pdf
*
* http://stackoverflow.com/questions/740389/good-implementations-of-reinforced-learning
*
* http://stackoverflow.com/questions/10722064/training-a-neural-network-with-reinforcement-learning
*
* http://remi.coulom.free.fr/Thesis/
* http://remi.coulom.free.fr/Publications/Thesis.pdf
*
* http://link.springer.com/article/10.1007/BF00992696
*
* http://scholar.google.cz/scholar?start=10&q=reinforcement+learning+feedforward&hl=en&as_sdt=0,5&as_vis=1
*
*/
namespace Shin
{
namespace NeuronNetwork
{
namespace Learning
{
class QLearning
{
public:
QLearning(size_t input, size_t size, size_t choices);
~QLearning();
QLearning(const QLearning&) =delete;
QLearning& operator=(const QLearning&) =delete;
void learnDelayed(std::vector<std::pair<Solution,Problem>> &p, float quality);
void learnDelayed(std::vector<std::pair<Problem,int>> &p, float quality);
void learn(Solution &s, Problem &p, float quality);
void learn(Problem &p,int action, float quality);
void learnNetwork(double maxError=0.01);
void learningCoeficient(double t);
void initialise(size_t input, size_t size,size_t choices);
int getChoice(Problem &p);
Solution getSolution(Problem &p) {return fun.getSolution(p);}
void setLearningCoeficient(double ok, double err) {fun.setLearningCoeficient(ok,err);};
void opticalBackPropagation() {fun.opticalBackPropagation();};
protected:
RL::QFunctionNetwork fun;
};
}
}
}
#endif

View File

@@ -1 +0,0 @@
./Supervised.h

View File

@@ -1,20 +0,0 @@
#include "./Supervised"
float Shin::NeuronNetwork::Learning::Supervised::calculateError(const Shin::NeuronNetwork::Solution& expectation, const Shin::NeuronNetwork::Solution& solution)
{
register float a=0;
for (size_t i=0;i<expectation.size();i++)
{
a+=pow(expectation[i]-solution[i],2)/2;
}
return a;
}
float Shin::NeuronNetwork::Learning::Supervised::teachSet(const std::vector<std::pair<Shin::NeuronNetwork::Problem,Shin::NeuronNetwork::Solution>> &set)
{
double error=0;
for (register size_t i=0;i<set.size();i++)
{
error+=teach(set[i].first,set[i].second);
}
return error;
}

View File

@@ -1 +0,0 @@
./Unsupervised.h

View File

@@ -1 +0,0 @@
#include "./Unsupervised"

View File

@@ -1,29 +0,0 @@
#ifndef _UNSUPERVISEDLEARNING_H_
#define _UNSUPERVISEDLEARNING_H_
#include <math.h>
#include <cstddef>
#include "../Solution.h"
#include "../FeedForward.h"
namespace Shin
{
namespace NeuronNetwork
{
namespace Learning
{
class Unsupervised
{
public:
Unsupervised(FeedForward &n): network(n) {};
virtual ~Unsupervised() {};
Unsupervised() =delete;
protected:
FeedForward &network;
};
}
}
}
#endif

View File

@@ -1,54 +0,0 @@
#ifndef _S_NN_NN_H_
#define _S_NN_NN_H_
#include "Problem"
#include "Solution"
#include "Neuron"
#include <cstdarg>
#include <vector>
#include <initializer_list>
#include <iostream>
namespace Shin
{
namespace NeuronNetwork
{
const float lambda=0.8;
class Layer
{
public:
virtual ~Layer() {};
virtual Neuron& operator[](const size_t& neuron)=0;
virtual size_t size() const=0;
};
class Network
{
public:
inline Network(double lam):lambda(lam) {};
virtual ~Network() {};
virtual Solution solve(const Problem&)=0;
virtual Layer& operator[](const size_t &layer)=0;
inline float getLambda() const {return lambda;}
inline virtual void setThreads(const unsigned&t) final {threads=t;}
protected:
float lambda;
unsigned threads=1;
};
class ACyclicNetwork : public Network
{
public:
inline ACyclicNetwork(double lam):Network(lam) {};
virtual size_t size() const=0;
protected:
private:
};
}
}
#endif

View File

@@ -1,28 +0,0 @@
#ifndef _S_NN_NEURON_H_
#define _S_NN_NEURON_H_
#include <cstdarg>
namespace Shin
{
namespace NeuronNetwork
{
class Neuron
{
public:
Neuron() {};
virtual ~Neuron() {};
virtual float getPotential() const =0;
virtual void setPotential(const float &p) =0;
virtual float getWeight(const size_t&) const =0;
virtual void setWeight(const size_t& i,const float &p) =0;
virtual float output() const =0;
virtual float input() const=0;
virtual float derivatedOutput() const=0;
protected:
};
}
}
#endif

View File

@@ -6,19 +6,16 @@
#include "IO.h"
namespace Shin
{
namespace NeuronNetwork
{
class Problem : public IO
{
public:
Problem(): IO() {};
Problem(std::vector<float> &p):IO(p) {};
Problem(const std::vector<float> &p):IO(p) {};
Problem(const std::initializer_list<float> &a) : IO(a) {};
protected:
private:
};
}
}
#endif

View File

@@ -5,8 +5,6 @@
#include "IO.h"
namespace Shin
{
namespace NeuronNetwork
{
class Solution : public IO
{
@@ -19,7 +17,6 @@ namespace NeuronNetwork
inline void push_back(const float &a) {data.push_back(a);};
};
}
}
#endif

View File

@@ -16,7 +16,7 @@ NN_TESTS= $(NN_TESTEABLE) nn-pong
ALL_TESTS=$(NN_TESTEABLE) $(GEN_TESTS)
LIBS=$(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuronNetwork.a
LIBS=$(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuralNetwork.a
#LIBS=-lGenetics.so -lNeuronNetwork
CXXFLAGS += -I$(LIB_DIR)
@@ -30,10 +30,10 @@ test: all
@for i in $(ALL_TESTS); do echo -n ./$$i; echo -n " - "; ./$$i; echo ""; done
g-%: g-%.cpp $(LIB_DIR)/Genetics.a
$(CXX) $(CXXFLAGS) $(OPTIMALIZATION) -o $@ $< $ $(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuronNetwork.a -lm
$(CXX) $(CXXFLAGS) $(OPTIMALIZATION) -o $@ $< $ $(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuralNetwork.a -lm
nn-%: nn-%.cpp $(LIB_DIR)/NeuronNetwork.a
$(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuronNetwork.a -lm
nn-%: nn-%.cpp $(LIB_DIR)/NeuralNetwork.a
$(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuralNetwork.a -lm
nn-pong: ./nn-pong.cpp $(LIB_DIR)/NeuronNetwork.a
$(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuronNetwork.a -lm -lalleg -lGL

View File

@@ -1,13 +1,12 @@
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/BackPropagation"
#include "../src/NeuralNetwork/FeedForward"
#include "../src/NeuralNetwork/Learning/BackPropagation"
#include <iostream>
#include <vector>
//typedef Shin::NeuronNetwork::Problem X;
class X: public Shin::NeuronNetwork::Problem
class X: public Shin::Problem
{
public:
X(const X& a) :Problem(a) {}
@@ -17,18 +16,18 @@ class X: public Shin::NeuronNetwork::Problem
int main(int argc,char**)
{
srand(time(NULL));
std::vector<Shin::NeuronNetwork::Solution> s;
std::vector<Shin::Solution> s;
std::vector<X> p;
//
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({1})));
s.push_back(Shin::Solution(std::vector<float>({1})));
p.push_back(X(std::vector<bool>({0})));
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0})));
s.push_back(Shin::Solution(std::vector<float>({0})));
p.push_back(X(std::vector<bool>({1})));
Shin::NeuronNetwork::FeedForward q({1,5000,5000,15000,2});
Shin::NeuronNetwork::Learning::BackPropagation b(q);
Shin::NeuralNetwork::FeedForward q({1,5000,5000,15000,2});
Shin::NeuralNetwork::Learning::BackPropagation b(q);
if(argc > 1)
{
std::cerr << "THREADING\n";

View File

@@ -1,10 +1,9 @@
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/FeedForward.h"
#include "../src/NeuralNetwork/FeedForward"
#include <iostream>
class X: public Shin::NeuronNetwork::Problem
class X: public Shin::Problem
{
protected:
std::vector<float> representation() const
@@ -15,8 +14,8 @@ class X: public Shin::NeuronNetwork::Problem
int main()
{
Shin::NeuronNetwork::FeedForward n({2,4,2});
Shin::NeuronNetwork::FeedForward nq({2,4,2});
Shin::NeuralNetwork::FeedForward n({2,4,2});
Shin::NeuralNetwork::FeedForward nq({2,4,2});
if(n[1].size() != 4)
{
std::cout << "Actual size:" << n[0].size();
@@ -34,8 +33,8 @@ int main()
std::cout << "Potential: " << n[2][0].getPotential() << "\n";
std::cout << "Potential: " << nq[2][0].getPotential() << "\n";
Shin::NeuronNetwork::Solution s =n.solve(X());
Shin::NeuronNetwork::Solution sq =nq.solve(X());
Shin::Solution s =n.solve(X());
Shin::Solution sq =nq.solve(X());
if(s.size()!=2)
{

View File

@@ -1,11 +1,10 @@
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/BackPropagation"
#include "../src/NeuralNetwork/FeedForward"
#include "../src/NeuralNetwork/Learning/BackPropagation"
#include <iostream>
#include <vector>
class X: public Shin::NeuronNetwork::Problem
class X: public Shin::Problem
{
public:
X(const X& a) :Problem(),q(a.q) {}
@@ -20,21 +19,21 @@ class X: public Shin::NeuronNetwork::Problem
int main()
{
std::vector<Shin::NeuronNetwork::Solution> s;
std::vector<Shin::Solution> s;
std::vector<X> p;
//
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0})));
s.push_back(Shin::Solution(std::vector<float>({0})));
p.push_back(X(std::vector<float>({1,0})));
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0})));
s.push_back(Shin::Solution(std::vector<float>({0})));
p.push_back(X(std::vector<float>({0,1})));
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0})));
s.push_back(Shin::Solution(std::vector<float>({0})));
p.push_back(X(std::vector<float>({0,0})));
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({1})));
s.push_back(Shin::Solution(std::vector<float>({1})));
p.push_back(X(std::vector<float>({1,1})));
Shin::NeuronNetwork::FeedForward q({2,4,1});
Shin::NeuronNetwork::Learning::BackPropagation b(q);
Shin::NeuralNetwork::FeedForward q({2,4,1});
Shin::NeuralNetwork::Learning::BackPropagation b(q);
b.setLearningCoeficient(10);
for(int i=0;i<4;i++)

View File

@@ -1,7 +1,7 @@
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuralNetwork/FeedForward"
#include <iostream>
class X: public Shin::NeuronNetwork::Problem
class X: public Shin::Problem
{
public: X(bool x,bool y):Problem() {data.push_back(x);data.push_back(y);}
};
@@ -10,7 +10,7 @@ int main()
{
srand(time(NULL));
int lm=5;
Shin::NeuronNetwork::FeedForward net({2,lm,1});
Shin::NeuralNetwork::FeedForward net({2,lm,1});
bool x=1;
int prev_err=0;
int err=0;
@@ -47,7 +47,7 @@ int main()
{
bool x= rand()%2;
bool y=rand()%2;
Shin::NeuronNetwork::Solution s =net.solve(X(x,y));
Shin::Solution s =net.solve(X(x,y));
if(s[0]!= (x xor y))
err++;
}

View File

@@ -1,38 +1,31 @@
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/BackPropagation"
#include "../src/NeuralNetwork/FeedForward"
#include "../src/NeuralNetwork/Learning/BackPropagation"
#include <iostream>
#include <vector>
class X: public Shin::NeuronNetwork::Problem
class X: public Shin::Problem
{
public:
X(const X& a) :q(a.q) {}
X(const std::vector<float> &a):q(a) {}
std::vector<float> representation() const
{
return q;
}
protected:
std::vector<float> q;
X(const X& a) :Problem(a.data) {}
X(const std::vector<float> &a):Problem(a) {}
};
int main(int argc, char**)
{
srand(time(NULL));
std::vector<Shin::NeuronNetwork::Solution> s;
std::vector<Shin::Solution> s;
std::vector<X> p;
//
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({1})));
s.push_back(Shin::Solution(std::vector<float>({1})));
p.push_back(X(std::vector<float>({0})));
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0})));
s.push_back(Shin::Solution(std::vector<float>({0})));
p.push_back(X(std::vector<float>({1})));
Shin::NeuronNetwork::FeedForward q({1,5000,5000,5000,1});
Shin::NeuronNetwork::Learning::BackPropagation b(q);
Shin::NeuralNetwork::FeedForward q({1,5000,5000,5000,1});
Shin::NeuralNetwork::Learning::BackPropagation b(q);
if(argc >1)
{
@@ -42,6 +35,6 @@ int main(int argc, char**)
for(int i=0;i<2;i++)
{
b.teach(p[i%2],s[i%2]);
std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
std::cerr << i%2 <<". FOR: [" << p[i%2][0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
}
}

View File

@@ -1,10 +1,10 @@
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/BackPropagation"
#include "../src/NeuralNetwork/FeedForward"
#include "../src/NeuralNetwork/Learning/BackPropagation"
#include <iostream>
#include <vector>
class X: public Shin::NeuronNetwork::Problem
class X: public Shin::Problem
{
public:
X(const X& a) :Problem(a) {}
@@ -17,14 +17,14 @@ int main()
for (int test=0;test<2;test++)
{
Shin::NeuronNetwork::FeedForward q({2,3,1});
Shin::NeuronNetwork::Learning::BackPropagation b(q);
Shin::NeuralNetwork::FeedForward q({2,3,1});
Shin::NeuralNetwork::Learning::BackPropagation b(q);
std::vector<std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution> > set;
set.push_back(std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution>(Shin::NeuronNetwork::Problem({0,0}),Shin::NeuronNetwork::Solution({0})));
set.push_back(std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution>(Shin::NeuronNetwork::Problem({1,0}),Shin::NeuronNetwork::Solution({1})));
set.push_back(std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution>(Shin::NeuronNetwork::Problem({1,1}),Shin::NeuronNetwork::Solution({0})));
set.push_back(std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution>(Shin::NeuronNetwork::Problem({0,1}),Shin::NeuronNetwork::Solution({1})));
std::vector<std::pair<Shin::Problem, Shin::Solution> > set;
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({0,0}),Shin::Solution({0})));
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({1,0}),Shin::Solution({1})));
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({1,1}),Shin::Solution({0})));
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({0,1}),Shin::Solution({1})));
if(test)
{
std::cerr << "Testing with entropy\n";

View File

@@ -1,10 +1,10 @@
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/OpticalBackPropagation"
#include "../src/NeuralNetwork/FeedForward"
#include "../src/NeuralNetwork/Learning/OpticalBackPropagation"
#include <iostream>
#include <vector>
class X: public Shin::NeuronNetwork::Problem
class X: public Shin::Problem
{
public:
X(const X& a) :Problem(a) {}
@@ -16,15 +16,15 @@ int main()
srand(time(NULL));
for (int test=0;test<2;test++)
{
Shin::NeuronNetwork::FeedForward q({2,40,1});
Shin::NeuronNetwork::Learning::OpticalBackPropagation b(q);
Shin::NeuralNetwork::FeedForward q({2,40,1});
Shin::NeuralNetwork::Learning::OpticalBackPropagation b(q);
b.setLearningCoeficient(0.1);
std::vector<std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution> > set;
set.push_back(std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution>(Shin::NeuronNetwork::Problem({0,0}),Shin::NeuronNetwork::Solution({0})));
set.push_back(std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution>(Shin::NeuronNetwork::Problem({1,0}),Shin::NeuronNetwork::Solution({1})));
set.push_back(std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution>(Shin::NeuronNetwork::Problem({1,1}),Shin::NeuronNetwork::Solution({0})));
set.push_back(std::pair<Shin::NeuronNetwork::Problem, Shin::NeuronNetwork::Solution>(Shin::NeuronNetwork::Problem({0,1}),Shin::NeuronNetwork::Solution({1})));
std::vector<std::pair<Shin::Problem, Shin::Solution> > set;
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({0,0}),Shin::Solution({0})));
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({1,0}),Shin::Solution({1})));
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({1,1}),Shin::Solution({0})));
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({0,1}),Shin::Solution({1})));
if(test)
{
std::cerr << "Testing with entropy\n";

View File

@@ -1,13 +1,12 @@
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/BackPropagation"
#include "../src/NeuralNetwork/FeedForward"
#include "../src/NeuralNetwork/Learning/BackPropagation"
#include <iostream>
#include <vector>
//typedef Shin::NeuronNetwork::Problem X;
class X: public Shin::NeuronNetwork::Problem
class X: public Shin::Problem
{
public:
X(const X& a) :Problem(a) {}
@@ -17,21 +16,21 @@ class X: public Shin::NeuronNetwork::Problem
int main(int argc,char**)
{
srand(time(NULL));
std::vector<Shin::NeuronNetwork::Solution> s;
std::vector<Shin::Solution> s;
std::vector<X> p;
p.push_back(X(std::vector<float>({0,0})));
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0.4,0.3,0.2,0.1})));
s.push_back(Shin::Solution(std::vector<float>({0.4,0.3,0.2,0.1})));
p.push_back(X(std::vector<float>({0,0.5})));
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0.6,0.3,0.2,0.5})));
s.push_back(Shin::Solution(std::vector<float>({0.6,0.3,0.2,0.5})));
p.push_back(X(std::vector<float>({0.4,0.5})));
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0.4,0.4,0.2,0.8})));
Shin::NeuronNetwork::FeedForward q({2,4,4,4},1.0);
Shin::NeuronNetwork::Learning::BackPropagation bp(q);
s.push_back(Shin::Solution(std::vector<float>({0.4,0.4,0.2,0.8})));
Shin::NeuralNetwork::FeedForward q({2,4,4,4},1.0);
Shin::NeuralNetwork::Learning::BackPropagation bp(q);
bp.setLearningCoeficient(0.2);
for(int i=0;i<3;i++)
{
Shin::NeuronNetwork::Solution sp =q.solve(p[i]);
Shin::Solution sp =q.solve(p[i]);
std::cerr << sp[0] << "," << sp[1] << "," << sp[2] << "," << sp[3] << "\n";
}
for(int i=0;i<4;i++)
@@ -44,7 +43,7 @@ int main(int argc,char**)
std::cerr << "XXXXXXXXXXXX\n";
for(int i=0;i<3;i++)
{
Shin::NeuronNetwork::Solution sp =q.solve(p[i]);
Shin::Solution sp =q.solve(p[i]);
std::cerr << sp[0] << "," << sp[1] << "," << sp[2] << "," << sp[3] << "\n";
}
}