initial cleaning

This commit is contained in:
2014-12-10 19:57:54 +01:00
parent aab9a073e9
commit 5d0fa9301b
38 changed files with 906 additions and 583 deletions

View File

@@ -1 +0,0 @@
1

View File

@@ -2,16 +2,19 @@ include ../Makefile.const
OPTIMALIZATION=
LIB_DIR = ../lib
GEN_TESTS=g-01 g-02
NN_TESTS=\
#GEN_TESTS=g-01 g-02
NN_TESTEABLE=\
nn-01 nn-02 nn-03 nn-bp-sppeed \
nn-bp-xor \
nn-obp-xor \
nn-rl-xor nn-rl-and nn-rl-qfun\
nn-reinforcement nn-04
# nn-test nn-rl-qfun\
nn-rl-xor nn-rl-and nn-rl-xor2\
nn-reinforcement nn-04 \
nn-pong
ALL_TESTS=$(NN_TESTS) $(GEN_TESTS)
NN_TESTS= $(NN_TESTEABLE) nn-pong
ALL_TESTS=$(NN_TESTEABLE) $(GEN_TESTS)
LIBS=$(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuronNetwork.a
#LIBS=-lGenetics.so -lNeuronNetwork
@@ -22,6 +25,7 @@ all:| lib $(ALL_TESTS);
gen: $(GEN_TESTS)
test: all
@for i in $(ALL_TESTS); do echo -n ./$$i; echo -n " - "; ./$$i; echo ""; done
@@ -31,6 +35,9 @@ g-%: g-%.cpp $(LIB_DIR)/Genetics.a
nn-%: nn-%.cpp $(LIB_DIR)/NeuronNetwork.a
$(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuronNetwork.a -lm
nn-pong: ./nn-pong.cpp $(LIB_DIR)/NeuronNetwork.a
$(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuronNetwork.a -lm -lalleg -lGL
lib:
make -C ../

View File

@@ -1,5 +1,5 @@
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/FeedForwardQuick"
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/BackPropagation"
#include <iostream>
@@ -27,7 +27,7 @@ int main(int argc,char**)
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0})));
p.push_back(X(std::vector<bool>({1})));
Shin::NeuronNetwork::FeedForwardNetworkQuick q({1,5000,5000,15000,2});
Shin::NeuronNetwork::FeedForward q({1,5000,5000,15000,2});
Shin::NeuronNetwork::Learning::BackPropagation b(q);
if(argc > 1)
{

View File

@@ -1,6 +1,6 @@
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/FeedForwardQuick.h"
#include "../src/NeuronNetwork/FeedForward.h"
#include <iostream>
@@ -15,24 +15,24 @@ class X: public Shin::NeuronNetwork::Problem
int main()
{
Shin::NeuronNetwork::FeedForwardNetwork n({2,4,2});
Shin::NeuronNetwork::FeedForwardNetworkQuick nq({2,4,2});
if(n[1]->size() != 4)
Shin::NeuronNetwork::FeedForward n({2,4,2});
Shin::NeuronNetwork::FeedForward nq({2,4,2});
if(n[1].size() != 4)
{
std::cout << "Actual size:" << n[0]->size();
std::cout << "Actual size:" << n[0].size();
return 1;
}
if(nq[1]->size() != 4)
if(nq[1].size() != 4)
{
std::cout << "QUICK Actual size:" << nq[0]->size();
std::cout << "QUICK Actual size:" << nq[0].size();
return 1;
}
n[2]->operator[](0)->setPotential(25);
nq[2]->operator[](0)->setPotential(25);
n[2][0].setPotential(25);
nq[2][0].setPotential(25);
std::cout << "Potential: " << n[2]->operator[](0)->getPotential() << "\n";
std::cout << "Potential: " << nq[2]->operator[](0)->getPotential() << "\n";
std::cout << "Potential: " << n[2][0].getPotential() << "\n";
std::cout << "Potential: " << nq[2][0].getPotential() << "\n";
Shin::NeuronNetwork::Solution s =n.solve(X());
Shin::NeuronNetwork::Solution sq =nq.solve(X());
@@ -51,8 +51,8 @@ int main()
return 1;
}
}
n[2]->operator[](0)->setWeight(0,26.0);
nq[2]->operator[](0)->setWeight(0,26.0);
n[2][0].setWeight(0,26.0);
nq[2][0].setWeight(0,26.0);
s =n.solve(X());
sq =n.solve(X());

View File

@@ -1,5 +1,5 @@
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/FeedForwardQuick"
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/BackPropagation"
#include <iostream>
@@ -33,7 +33,7 @@ int main()
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({1})));
p.push_back(X(std::vector<float>({1,1})));
Shin::NeuronNetwork::FeedForwardNetworkQuick q({2,4,1});
Shin::NeuronNetwork::FeedForward q({2,4,1});
Shin::NeuronNetwork::Learning::BackPropagation b(q);
b.setLearningCoeficient(10);

View File

@@ -1,20 +1,16 @@
#include "../src/NeuronNetwork/Network"
#include "../src/NeuronNetwork/FeedForward"
#include <iostream>
class X: public Shin::NeuronNetwork::Problem
{
public: X(bool x,bool y):x(x),y(y) {}
protected: std::vector<float> representation() const { return std::vector<float>({x,y}); }
private:
bool x;
bool y;
public: X(bool x,bool y):Problem() {data.push_back(x);data.push_back(y);}
};
int main()
{
srand(time(NULL));
int lm=5;
Shin::NeuronNetwork::FeedForwardNetwork net({2,lm,1});
Shin::NeuronNetwork::FeedForward net({2,lm,1});
bool x=1;
int prev_err=0;
int err=0;
@@ -42,10 +38,10 @@ int main()
w=rand()%2;
if(l==2)
n=0;
pot=net[l]->operator[](n)->getPotential();
net[l]->operator[](n)->setPotential(pot*(rand()%21+90)/100);
wei=net[l]->operator[](n)->getWeight(w);
net[l]->operator[](n)->setWeight(w,wei*(rand()%21+90)/100);
pot=net[l][n].getPotential();
net[l][n].setPotential(pot*(rand()%21+90)/100);
wei=net[l][n].getWeight(w);
net[l][n].setWeight(w,wei*(rand()%21+90)/100);
for(int i=0;i<100;i++)
{
@@ -58,10 +54,9 @@ int main()
if(err > prev_err)
{
net[l]->operator[](n)->setPotential(pot);
net[l]->operator[](n)->setWeight(w,wei);
net[l][n].setPotential(pot);
net[l][n].setWeight(w,wei);
};
// std::cout << "C: " << c << " err: " << err << " prev: "<<prev_err << "\n";
prev_err=err;
if(err <1)
x=0;

View File

@@ -1,5 +1,5 @@
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/FeedForwardQuick"
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/BackPropagation"
#include <iostream>
@@ -31,7 +31,7 @@ int main(int argc, char**)
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0})));
p.push_back(X(std::vector<float>({1})));
Shin::NeuronNetwork::FeedForwardNetworkQuick q({1,5000,5000,5000,1});
Shin::NeuronNetwork::FeedForward q({1,5000,5000,5000,1});
Shin::NeuronNetwork::Learning::BackPropagation b(q);
if(argc >1)

View File

@@ -1,4 +1,4 @@
#include "../src/NeuronNetwork/FeedForwardQuick"
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/BackPropagation"
#include <iostream>
@@ -16,7 +16,7 @@ int main()
for (int test=0;test<2;test++)
{
Shin::NeuronNetwork::FeedForwardNetworkQuick q({2,3,1});
Shin::NeuronNetwork::FeedForward q({2,3,1});
Shin::NeuronNetwork::Learning::BackPropagation b(q);
srand(time(NULL));
@@ -67,5 +67,13 @@ int main()
if(err <0.001)
break;
}
for(auto a:p)
{
delete a;
}
for(auto a:s)
{
delete a;
}
}
}

View File

@@ -1,4 +1,4 @@
#include "../src/NeuronNetwork/FeedForwardQuick"
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/OpticalBackPropagation"
#include <iostream>
@@ -16,7 +16,7 @@ int main()
for (int test=0;test<2;test++)
{
Shin::NeuronNetwork::FeedForwardNetworkQuick q({2,40,1});
Shin::NeuronNetwork::FeedForward q({2,40,1});
Shin::NeuronNetwork::Learning::OpticalBackPropagation b(q);
srand(time(NULL));

344
tests/nn-pong.cpp Normal file
View File

@@ -0,0 +1,344 @@
#include <allegro.h>
#include <cstdlib>
#include <time.h>
#include "../src/NeuronNetwork/Learning/QLearning.h"
#include <sys/time.h>
int learningGames=6000;
int ball_x = 320;
int ball_y = 240;
int ball_tempX = 320;
int ball_tempY = 240;
int p1_x = 20;
int p1_y = 210;
int p1_tempX = 20;
int p1_tempY = 210;
int p2_x = 620;
int p2_y = 210;
int p2_tempX = 620;
int p2_tempY = 210;
int i=0;
long game=0;
int q=0;
int speed=1;
bool randomLearner=0;
int dir; //This will keep track of the circles direction
//1= up and left, 2 = down and left, 3= up and right, 4 = down and right
BITMAP *buffer; //This will be our temporary bitmap for double buffering
class X: public Shin::NeuronNetwork::Problem
{
public:
X(int p1,int ballX,int ballY,int p2)//, int ballY)
{
data.push_back((float)p1/480.0);
data.push_back((float)ballX/640.0);
data.push_back((float)ballY/480.0);
}
};
Shin::NeuronNetwork::Learning::QLearning l(3,15,3);
std::vector <std::pair<Shin::NeuronNetwork::Problem,int>> p1x;
void propagateOKtoP1(double quality=10)
{
l.learnDelayed(p1x,quality);
p1x.clear();
}
void moveBall(){
ball_tempX = ball_x;
ball_tempY = ball_y;
if (dir == 1 && ball_x > 5 && ball_y > 5){
if( ball_x == p1_x + 15 && ball_y >= p1_y && ball_y <= p1_y + 60){
dir = rand()% 2 + 3;
propagateOKtoP1(100);
}else{
--ball_x;
--ball_y;
}
} else if (dir == 2 && ball_x > 5 && ball_y < 475){
if( ball_x == p1_x + 15 && ball_y >= p1_y && ball_y <= p1_y + 60){
dir = rand()% 2 + 3;
propagateOKtoP1(100);
}else{
--ball_x;
++ball_y;
}
} else if (dir == 3 && ball_x < 635 && ball_y > 5){
if( ball_x + 5 == p2_x && ball_y >= p2_y && ball_y <= p2_y + 60){
dir = rand()% 2 + 1;
}else{
++ball_x;
--ball_y;
}
} else if (dir == 4 && ball_x < 635 && ball_y < 475){
if( ball_x + 5 == p2_x && ball_y >= p2_y && ball_y <= p2_y + 60){
dir = rand()% 2 + 1;
}else{
++ball_x;
++ball_y;
}
} else {
if (dir == 1 || dir == 3) ++dir;
else if (dir == 2 || dir == 4) --dir;
}
}
char p1Move(){
X p=X(p1_y,ball_x,ball_y,p2_y);
if(game <learningGames)
{
if(randomLearner)
{
register int tmp=game%3;
if(rand()%5==0)
{
tmp=(tmp+rand())%3;
}
if(tmp==1)
{
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,2));//,ball_tempX,ball_tempY));
return 1;
}else if(tmp==0)
{
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,0));//,ball_tempX,ball_tempY));
return -1;
}else
{
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,1));//,ball_tempX,ball_tempY));
return 0;
}
}else
{
if( p1_tempY > ball_y && p1_y > 0){
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,0));//,ball_tempX,ball_tempY));
return -1;
} else if( p1_tempY < ball_y && p1_y < 420){
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,2));//,ball_tempX,ball_tempY));
return 1;
}else
{
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,1));//,ball_tempX,ball_tempY));
return 0;
}
}
}
int j=l.getChoice(p);
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,j));//,ball_tempX,ball_tempY));
return j-1;
}
char p2Move(){
if(game >= learningGames)
{
if(key[KEY_UP])
return 1;
else if( key[KEY_DOWN])
return -1;
else
return 0;
}else
{
if(rand()%10==0)
{
return (rand()%3)-1;
}
if( p2_tempY > ball_y){
return -1;
} else if( p2_tempY < ball_y){
return 1;
}
return 0;
}
}
void startNew(){
clear_keybuf();
if(game==learningGames)
textout_ex( screen, font, "Player 1 learned! Push a button to start a game.", 160, 240, makecol( 255, 0, 0), makecol( 0, 0, 0));
if(game >= learningGames)
readkey();
clear_to_color( buffer, makecol( 0, 0, 0));
ball_x = 350;
ball_y = rand()%481;
p1_x = 20;
p1_y = 210;
p2_x = 620;
p2_y = 210;
}
void checkWin(){
int won=0;
if ( ball_x < p1_x){
won=1;
game++;
textout_ex( screen, font, "Player 2 Wins!", 320, 240, makecol( 255, 0, 0), makecol( 0, 0, 0));
propagateOKtoP1(-100);
startNew();
} else if ( ball_x > p2_x){
game++;
won=1;
textout_ex( screen, font, "Player 1 Wins!", 320, 240, makecol( 255, 0, 0), makecol( 0, 0, 0));
propagateOKtoP1(100);
startNew();
}
}
void setupGame(){
acquire_screen();
rectfill( buffer, p1_x, p1_y, p1_x + 10, p1_y + 60, makecol ( 0, 0, 255));
rectfill( buffer, p2_x, p2_y, p2_x + 10, p2_y + 60, makecol ( 0, 0, 255));
circlefill ( buffer, ball_x, ball_y, 5, makecol( 128, 255, 0));
draw_sprite( screen, buffer, 0, 0);
release_screen();
srand( time(NULL));
dir = rand() % 4 + 1;
}
int main(int argc, char**argv)
{
allegro_init();
install_keyboard();
set_color_depth(16);
set_gfx_mode( GFX_AUTODETECT_WINDOWED, 640, 480, 0, 0);
l.setLearningCoeficient(0.01,0.01);
if(argc>=4 && argv[3][0]=='o')
{
std::cerr << "USING Optical Backpropagation\n";
l.opticalBackPropagation();
}
if(argc>=3)
{
std::cerr << "Setting learning coefficients to:" << atof(argv[1]) << "," << atof(argv[2]) << "\n";
l.setLearningCoeficient(atof(argv[1]),atof(argv[2]));
}
if(argc >=5)
{
std::cerr << "Setting learning games to:" << atof(argv[4]) << "\n";
learningGames=atof(argv[4]);
}
if(argc >=6 && argv[5][0]=='r')
{
std::cerr << "Setting random learning\n";
randomLearner=1;
}
buffer = create_bitmap( 640, 480);
setupGame();
speed=51;
int sleepTime=1000;
while(!key[KEY_ESC])
{
q++;
if(key[KEY_T])
{
std::cout << "ADDING next 500 learning games\n";
usleep(500000);
learningGames+=500;
}
if(game < learningGames)
{
if( key[KEY_UP] && speed < 200){
speed+=5;
}else if( key[KEY_DOWN] && speed >1 ){
speed-=5;
}
if(speed <= 0)
{
speed=1;
}
}else
{
speed=1;
}
register char p1dir=p1Move();
register char p2dir=p2Move();
p1_tempY = p1_y;
p2_tempY = p2_y;
if(p1dir < 0 && p1_y > 0){
--p1_y;
} else if( p1dir > 0 && p1_y < 420){
++p1_y;
}
if(p2dir > 0 && p2_y > 0){
--p2_y;
} else if( p2dir < 0 && p2_y < 420){
++p2_y;
}
moveBall();
if(key[KEY_PLUS_PAD] && sleepTime >=10)
sleepTime-=50;
else if(key[KEY_MINUS_PAD] && sleepTime <=15000)
sleepTime+=50;
if(i%speed==0)
{
acquire_screen();
rectfill( buffer, p1_tempX, p1_tempY, p1_tempX + 10, p1_tempY + 60, makecol ( 0, 0, 0));
rectfill( buffer, p1_x, p1_y, p1_x + 10, p1_y + 60, makecol ( 0, 0, 255));
rectfill( buffer, p2_tempX, p2_tempY, p2_tempX + 10, p2_tempY + 60, makecol ( 0, 0, 0));
rectfill( buffer, p2_x, p2_y, p2_x + 10, p2_y + 60, makecol ( 0, 0, 255));
circlefill ( buffer, ball_tempX, ball_tempY, 5, makecol( 0, 0, 0));
circlefill ( buffer, ball_x, ball_y, 5, makecol( 128, 255, 0));
draw_sprite( screen, buffer, 0, 0);
release_screen();
usleep(sleepTime);
}
checkWin();
i++;
}
return 0;
}
END_OF_MAIN()

View File

@@ -1,4 +1,4 @@
#include "../src/NeuronNetwork/FeedForwardQuick"
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/Reinforcement.h"
#include "../src/NeuronNetwork/Solution.h"
@@ -28,7 +28,7 @@ int main()
p.push_back(X(std::vector<float>({1,1})));
Shin::NeuronNetwork::FeedForwardNetworkQuick q({2,6,2});
Shin::NeuronNetwork::FeedForward q({2,6,2});
Shin::NeuronNetwork::Learning::Reinforcement b(q);
b.getPropagator().setLearningCoeficient(1);
int i=0;

View File

@@ -1,4 +1,4 @@
#include "../src/NeuronNetwork/FeedForwardQuick"
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/Reinforcement.h"
#include "../src/NeuronNetwork/Solution.h"
@@ -25,7 +25,7 @@ int main()
p.push_back(new X(std::vector<float>({1,0})));
p.push_back(new X(std::vector<float>({0,1})));
Shin::NeuronNetwork::FeedForwardNetworkQuick q({2,1});
Shin::NeuronNetwork::FeedForward q({2,1});
Shin::NeuronNetwork::Learning::Reinforcement b(q);
int i=0;
double targetQuality=0.5;

View File

@@ -1,4 +1,4 @@
#include "../src/NeuronNetwork/FeedForwardQuick"
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/Reinforcement"
#include "../src/NeuronNetwork/Learning/OpticalBackPropagation"
@@ -19,7 +19,7 @@ int main()
srand(time(NULL));
for (int test=0;test<3;test++)
{
Shin::NeuronNetwork::FeedForwardNetworkQuick q({2,4,1});
Shin::NeuronNetwork::FeedForward q({2,4,1});
Shin::NeuronNetwork::Learning::Reinforcement b(q);
//b.setPropagator(new Shin::NeuronNetwork::Learning::OpticalBackPropagation(q));
b.getPropagator().setLearningCoeficient(0.4);

99
tests/nn-rl-xor2.cpp Normal file
View File

@@ -0,0 +1,99 @@
#include "../src/NeuronNetwork/Learning/QLearning.h"
#include <iostream>
#include <vector>
class X: public Shin::NeuronNetwork::Problem
{
public:
X(const X& a) :Problem(a) {}
X(const std::vector<float> &a):Problem() {data=a;}
};
float atof(char *s)
{
int f, m, sign, d=1;
f = m = 0;
sign = (s[0] == '-') ? -1 : 1;
if (s[0] == '-' || s[0] == '+') s++;
for (; *s != '.' && *s; s++) {
f = (*s-'0') + f*10;
}
if (*s == '.')
for (++s; *s; s++) {
m = (*s-'0') + m*10;
d *= 10;
}
return sign*(f + (float)m/d);
}
float AA=10;
float getQuality(X& p, int action)
{
if((p[0]==0&& p[1]==0) ||(p[0]==1&& p[1]==1)) //should be 0
{
return action==1?-AA:AA;
}else // should be 1
{
return action==0?-AA:AA;
}
}
int main(int argc, char **argv)
{
srand(time(NULL));
Shin::NeuronNetwork::Learning::QLearning l(2,45,2);
if(argc==4 && argv[3][0]=='o')
{
std::cerr << "USING Optical Backpropagation\n";
l.opticalBackPropagation();
}
if(argc>=3)
{
std::cerr << "Setting learning coefficients to:" << atof(argv[1]) << "," << atof(argv[2]) << "\n";
l.setLearningCoeficient(atof(argv[1]),atof(argv[2]));
}
std::vector <std::pair<Shin::NeuronNetwork::Solution,Shin::NeuronNetwork::Problem>> p1x;
std::vector <X> states;
states.push_back(X(std::vector<float>({1,0})));
states.push_back(X(std::vector<float>({0,0})));
states.push_back(X(std::vector<float>({1,1})));
states.push_back(X(std::vector<float>({0,1})));
unsigned long step=0;
double quality=0;
while(step< 600000 && quality < (3.9*AA))
{
quality=0;
if(step%10000==0)
std::cerr << "STEP " << step << "\n";
for(unsigned i=0;i<states.size();i++)
{
int choice=l.getChoice(states[i]);
l.learn(states[i],choice,quality);
}
for(unsigned i=0;i<states.size();i++)
{
int choice=l.getChoice(states[i]);
quality+=getQuality(states[i],choice);
if(step%10000==0)
{
Shin::NeuronNetwork::Solution sol=l.getSolution(states[i]);
std::cerr << "\tState: [" << states[i][0] << "," << states[i][1] << "] Q-function: [" << sol[0] << "," <<sol[1] << "] Action " << choice << "\n";
}
}
step++;
}
std::cerr << step << "\n";
for(unsigned i=0;i<states.size();i++)
{
Shin::NeuronNetwork::Solution sol=l.getSolution(states[i]);
int choice=l.getChoice(states[i]);
std::cerr << "State: [" << states[i][0] << "," << states[i][1] << "] Q-function: [" << sol[0] << "," <<sol[1] << "] Action " << choice << "\n";
}
}

50
tests/nn-test.cpp Normal file
View File

@@ -0,0 +1,50 @@
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/BackPropagation"
#include <iostream>
#include <vector>
//typedef Shin::NeuronNetwork::Problem X;
class X: public Shin::NeuronNetwork::Problem
{
public:
X(const X& a) :Problem(a) {}
X(const std::vector<float> &a):Problem() {for(auto q:a){ data.push_back(q);}}
protected:
};
int main(int argc,char**)
{
srand(time(NULL));
std::vector<Shin::NeuronNetwork::Solution> s;
std::vector<X> p;
p.push_back(X(std::vector<float>({0,0})));
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0.4,0.3,0.2,0.1})));
p.push_back(X(std::vector<float>({0,0.5})));
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0.6,0.3,0.2,0.5})));
p.push_back(X(std::vector<float>({0.4,0.5})));
s.push_back(Shin::NeuronNetwork::Solution(std::vector<float>({0.4,0.4,0.2,0.8})));
Shin::NeuronNetwork::FeedForward q({2,4,4,4},1.0);
Shin::NeuronNetwork::Learning::BackPropagation bp(q);
bp.setLearningCoeficient(0.2);
for(int i=0;i<3;i++)
{
Shin::NeuronNetwork::Solution sp =q.solve(p[i]);
std::cerr << sp[0] << "," << sp[1] << "," << sp[2] << "," << sp[3] << "\n";
}
for(int i=0;i<4;i++)
{
for(int j=0;j<3;j++)
{
bp.teach(p[j],s[j]);
}
}
std::cerr << "XXXXXXXXXXXX\n";
for(int i=0;i<3;i++)
{
Shin::NeuronNetwork::Solution sp =q.solve(p[i]);
std::cerr << sp[0] << "," << sp[1] << "," << sp[2] << "," << sp[3] << "\n";
}
}