reffactored and recurrent implementation

This commit is contained in:
2016-01-22 13:21:34 +01:00
parent e61e616227
commit d424d87535
65 changed files with 12102 additions and 2361 deletions

View File

@@ -1,45 +1,27 @@
include ../Makefile.const
OPTIMALIZATION=
LIB_DIR = ../lib
#GEN_TESTS=g-01 g-02
NN_TESTEABLE=\
nn-01 nn-02 nn-03 nn-bp-sppeed \
nn-bp-xor \
nn-obp-xor \
nn-rl-xor nn-rl-and nn-rl-xor2\
nn-reinforcement nn-04 \
nn-pong
ALL_TESTS=activation basis recurrent
NN_TESTS= $(NN_TESTEABLE) nn-pong
ALL_TESTS=$(NN_TESTEABLE) $(GEN_TESTS)
LIBS=$(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuralNetwork.a
#LIBS=$(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuralNetwork.a
#LIBS=-lGenetics.so -lNeuronNetwork
CXXFLAGS += -I$(LIB_DIR)
all:| lib $(ALL_TESTS);
gen: $(GEN_TESTS)
all:$(ALL_TESTS);
test: all
@for i in $(ALL_TESTS); do echo -n ./$$i; echo -n " - "; ./$$i; echo ""; done
g-%: g-%.cpp $(LIB_DIR)/Genetics.a
$(CXX) $(CXXFLAGS) $(OPTIMALIZATION) -o $@ $< $ $(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuralNetwork.a -lm
../src/NeuralNetwork.so: lib
nn-%: nn-%.cpp $(LIB_DIR)/NeuralNetwork.a
$(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuralNetwork.a -lm
nn-pong: ./nn-pong.cpp $(LIB_DIR)/NeuronNetwork.a
$(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuronNetwork.a -lm -lalleg -lGL
%: %.cpp ../src/NeuralNetwork.so | lib %.cpp ../src/NeuralNetwork.so
$(CXX) $(CXXFLAGS) -I../include -o $@ $< $ -lm ../src/NeuralNetwork.so -msse4.2 -DHAVE_VECLIB
lib:
make -C ../
@make -C ../
clean:
@for i in $(ALL_TESTS);do rm -f $$i;done;
@for i in $(ALL_TESTS);do rm -f $$i;done;

59
tests/activation.cpp Normal file
View File

@@ -0,0 +1,59 @@
#include <NeuralNetwork/ActivationFunction/Heaviside.h>
#include <NeuralNetwork/ActivationFunction/Sigmoid.h>
#include <NeuralNetwork/ActivationFunction/HyperbolicTangent.h>
#include <iostream>
#include <cassert>
#include <chrono>
union {
__m128 v; // SSE 4 x float vector
float a[4]; // scalar array of 4 floats
} U;
int main() {
{
NeuralNetwork::ActivationFunction::Heaviside h(1.0);
assert(h(0.2) == 0);
assert(h(1.2) == 1);
}
{
NeuralNetwork::ActivationFunction::Heaviside h(0.7);
assert(h(0.2) == 0);
assert(h(0.8) == 1);
}
{
NeuralNetwork::ActivationFunction::Sigmoid s(0.7);
assert(s(0.1) > 0.517483);
assert(s(0.1) < 0.51750);
assert(s(10) > 0.998989);
assert(s(10) < 0.999189);
}
{
NeuralNetwork::ActivationFunction::Sigmoid s(5);
assert(s(0.1) > 0.622359);
assert(s(0.1) < 0.622559);
assert(s(0.7) > 0.970588);
assert(s(0.7) < 0.970788);
}
{
NeuralNetwork::ActivationFunction::Sigmoid s(0.7);
U.a[0]=0.1;
U.a[1]=10;
U.v=s(U.v);
assert(U.a[0] > 0.517483);
assert(U.a[0] < 0.51750);
assert(U.a[1] > 0.998989);
assert(U.a[1] < 0.999189);
}
std::cout << "OK" << std::endl;
return 0;
}

66
tests/basis.cpp Normal file
View File

@@ -0,0 +1,66 @@
#include <NeuralNetwork/BasisFunction/Linear.h>
#include <iostream>
#include <cassert>
#include <chrono>
int main() {
{
NeuralNetwork::BasisFunction::Linear l;
assert(39.0==l.compute({1,2,3,5},{1,2,3,5}));
assert(39.0==l.computeStreaming({1,2,3,5},{1,2,3,5}));
}
{
NeuralNetwork::BasisFunction::Linear l;
assert(88.0==l.computeStreaming({1,2,3,5,7},{1,2,3,5,7}));
assert(88.0==l.compute({1,2,3,5,7},{1,2,3,5,7}));
}
{
NeuralNetwork::BasisFunction::Linear l;
std::vector<float> w;
for(int in=0;in<100;in++) {
w.push_back(2);
}
assert(400.0==l.computeStreaming(w,w));
assert(400.0==l.compute(w,w));
}
{
NeuralNetwork::BasisFunction::Linear l;
std::vector<float> w;
for(int in=0;in<55;in++) {
w.push_back(2);
}
assert(220.0==l.computeStreaming(w,w));
assert(220.0==l.compute(w,w));
}
/*
std::vector<float> w;
std::vector<float> i;
for(int in=0;in<100000;in++) {
w.push_back(2);
i.push_back(2);
}
NeuralNetwork::BasisFunction::Linear l;
{
auto start = std::chrono::high_resolution_clock::now();
for(int in=0;in<1000;in++) {
l.compute(w,i);
}
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end-start;
std::cout << "C++ :" << diff.count() << " s\n";
}
{
auto start = std::chrono::high_resolution_clock::now();
for(int in=0;in<1000;in++) {
l.computeStreaming(w,i);
}
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end-start;
std::cout << "SSE :" << diff.count() << " s\n";
}
*/
std::cout <<"OK" << std::endl;
}

View File

@@ -1,47 +0,0 @@
#include "../src/NeuralNetwork/FeedForward.h"
#include "../src/NeuralNetwork/Learning/BackPropagation"
#include <iostream>
#include <vector>
//typedef Shin::NeuronNetwork::Problem X;
class X: public Shin::Problem
{
public:
X(const X& a) :Problem(a) {}
X(const std::vector<bool> &a):Problem() { for (bool s:a) data.push_back((float)s);}
protected:
};
int main(int argc,char**)
{
srand(time(NULL));
std::vector<Shin::Solution> s;
std::vector<X> p;
//
s.push_back(Shin::Solution(std::vector<float>({1})));
p.push_back(X(std::vector<bool>({0})));
s.push_back(Shin::Solution(std::vector<float>({0})));
p.push_back(X(std::vector<bool>({1})));
NeuralNetwork::FeedForward q({1,5000,5000,15000,2});
if(argc > 1)
{
std::cerr << "THREADING\n";
q.setThreads(2);
}
#include <chrono>
auto t1 = std::chrono::high_resolution_clock::now();
for(int i=0;i<1000;i++)
{
//b.teach(p[i%2],s[i%2]);
q.solve(p[i%2])[0];
//std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
}
auto t2 = std::chrono::high_resolution_clock::now();
std::cout << "Time: " << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count() << std::endl;
}

View File

@@ -1,95 +0,0 @@
#include "../src/NeuralNetwork/FeedForward"
#include <iostream>
class X: public Shin::Problem
{
protected:
std::vector<float> representation() const
{
return std::vector<float>({1,1});
}
};
class X1: public Shin::Problem
{
protected:
std::vector<float> representation() const
{
return std::vector<float>({1});
}
};
int main()
{
NeuralNetwork::FeedForward n({2,4,2});
NeuralNetwork::FeedForward nq({2,4,2});
if(n[2].size() != 4)
{
std::cout << "1) Actual size:" << n[1].size();
return 1;
}
if(nq[1].size() != 4)
{
std::cout << "QUICK Actual size:" << nq[1].size();
return 1;
}
n[2][0].setPotential(25);
nq[2][0].setPotential(25);
std::cout << "Potential: " << n[2][0].getPotential() << "\n";
std::cout << "Potential: " << nq[2][0].getPotential() << "\n";
Shin::Solution s =n.solve(X());
Shin::Solution sq =nq.solve(X());
if(s.size()!=2)
{
std::cout << "1";
return 1;
}
for(int i=0;i<2;i++)
{
if(s[i]!=sq[i])
{
std::cout << " 4 - " << i << " expected "<<s[i] << " was " <<sq[i];
return 1;
}
}
n[2][0].setWeight(0,26.0);
nq[2][0].setWeight(0,26.0);
s =n.solve(X());
sq =n.solve(X());
if(s.size()!=2)
{
std::cout << "a1";
return 1;
}
if(s[0]!=1)
{
std::cout << "a2";
return 1;
}
if(s[1]!=1)
{
std::cout << "a3";
return 1;
}
for(int i=0;i<2;i++)
{
if(s[i]!=sq[i])
{
std::cout << " a4 - " << i << " expected "<<s[i] << " was " <<sq[i];
return 1;
}
}
return 0;
}

View File

@@ -1,58 +0,0 @@
#include "../src/NeuralNetwork/FeedForward"
#include "../src/NeuralNetwork/Learning/BackPropagation"
#include <iostream>
#include <vector>
class X: public Shin::Problem
{
public:
X(const X& a) :Problem(),q(a.q) {}
X(const std::vector<float> &a):q(a) {}
std::vector<float> representation() const
{
return q;
}
protected:
std::vector<float> q;
};
int main()
{
std::vector<Shin::Solution> s;
std::vector<X> p;
//
s.push_back(Shin::Solution(std::vector<float>({0})));
p.push_back(X(std::vector<float>({1,0})));
s.push_back(Shin::Solution(std::vector<float>({0})));
p.push_back(X(std::vector<float>({0,1})));
s.push_back(Shin::Solution(std::vector<float>({0})));
p.push_back(X(std::vector<float>({0,0})));
s.push_back(Shin::Solution(std::vector<float>({1})));
p.push_back(X(std::vector<float>({1,1})));
Shin::NeuralNetwork::FeedForward q({2,4,1});
Shin::NeuralNetwork::Learning::BackPropagation b(q);
b.setLearningCoeficient(10);
for(int i=0;i<4;i++)
{
b.teach(p[i%4],s[i%4]);
std::cerr << i%4 <<". FOR: [" << p[i%4].representation()[0] << "," <<p[i%4].representation()[1] << "] res: " << q.solve(p[i%4])[0] << " should be " <<
s[i%4][0]<<"\n";
}
for(int i=0;i<40000;i++)
{
b.teach(p[i%4],s[i%4]);
}
std::cerr << "LEARNED\n";
for(int i=0;i<4;i++)
{
b.teach(p[i%4],s[i%4]);
std::cerr << i%4 <<". FOR: [" << p[i%4].representation()[0] << "," <<p[i%4].representation()[1] << "] res: " << q.solve(p[i%4])[0] << " should be " <<
s[i%4][0]<<"\n";
}
}

View File

@@ -1,69 +0,0 @@
#include "../src/NeuralNetwork/FeedForward"
#include <iostream>
class X: public Shin::Problem
{
public: X(bool x,bool y):Problem() {data.push_back(x);data.push_back(y);}
};
int main()
{
srand(time(NULL));
int lm=5;
Shin::NeuralNetwork::FeedForward net({2,lm,1});
bool x=1;
int prev_err=0;
int err=0;
int l;
int n;
int w;
int pot;
int wei;
int c=0;
std::cout << "\ntest 1 & 1 -" << net.solve(X(1,1))[0];
std::cout << "\ntest 1 & 0 -" << net.solve(X(1,0))[0];
std::cout << "\ntest 0 & 1 - " << net.solve(X(0,1))[0];
std::cout << "\ntest 0 & 0- " << net.solve(X(0,0))[0];
std::cout << "\n---------------------------------------";
do{
if(c%10000 ==1)
{
std::cout << "\nmixed";
srand(time(NULL));
}
err=0;
c++;
l=rand()%2+1;
n=rand()%lm;
w=rand()%2;
if(l==2)
n=0;
pot=net[l][n].getPotential();
net[l][n].setPotential(pot*(rand()%21+90)/100);
wei=net[l][n].getWeight(w);
net[l][n].setWeight(w,wei*(rand()%21+90)/100);
for(int i=0;i<100;i++)
{
bool x= rand()%2;
bool y=rand()%2;
Shin::Solution s =net.solve(X(x,y));
if(s[0]!= (x xor y))
err++;
}
if(err > prev_err)
{
net[l][n].setPotential(pot);
net[l][n].setWeight(w,wei);
};
prev_err=err;
if(err <1)
x=0;
}while(x);
std::cout << "\ntest 1 & 1 -" << net.solve(X(1,1))[0];
std::cout << "\ntest 1 & 0 -" << net.solve(X(1,0))[0];
std::cout << "\ntest 0 & 1 - " << net.solve(X(0,1))[0];
std::cout << "\ntest 0 & 0- " << net.solve(X(0,0))[0];
std::cout << "\nTotaly: " << c << "\n";
}

View File

@@ -1,71 +0,0 @@
#include "../src/NeuralNetwork/FeedForward"
#include <iostream>
#include <stdlib.h>
int main()
{
srand(time(NULL));
NeuralNetwork::FeedForward ns({1,1});
ns[1][0].setWeight(-1,0);
ns[1][0].setWeight(0,1);
Shin::Solution ss =ns.solve(Shin::Problem({1}));
if(ss[0] < 0.689874481 || ss[0] > 0.69)
{
std::cout << "1) Wrong counter: shoul be 0.626961, is: " << ss[0];
return 1;
}
NeuralNetwork::FeedForward xorF({2,2,1},0.8);
xorF[1][0].setWeight(-1,-6.06);
xorF[1][0].setWeight(0,-11.62);
xorF[1][0].setWeight(1,10.99);
xorF[1][1].setWeight(-1,-7.19);
xorF[1][1].setWeight(0,12.88);
xorF[1][1].setWeight(1,-13-13);
xorF[2][0].setWeight(-1,-6.56);
xorF[2][0].setWeight(0,13.34);
xorF[2][0].setWeight(1,-7.19);
ss= xorF.solve(Shin::Problem({0,1}));
if(ss[0] > 1 || ss[0] < 0.98 )
{
std::cout << "2) wrong output "<< ss[0] << "\n";
return 1;
}
ss= xorF.solve(Shin::Problem({0,1}));
if(ss[0] > 1 || ss[0] < 0.98 )
{
std::cout << "3) wrong output "<< ss[0];
return 1;
}
ss= xorF.solve(Shin::Problem({0,0}));
if(ss[0] <0 || ss[0] > 0.06 )
{
std::cout << "4) wrong output "<< ss[0] ;
return 1;
}
ss= xorF.solve(Shin::Problem({1,1}));
if(ss[0] <0 || ss[0] > 0.06 )
{
std::cout << "5) wrong output "<< ss[0];
return 1;
}
return 0;
}

View File

@@ -1,40 +0,0 @@
#include "../src/NeuralNetwork/FeedForward"
#include "../src/NeuralNetwork/Learning/BackPropagation"
#include <iostream>
#include <vector>
class X: public Shin::Problem
{
public:
X(const X& a) :Problem(a.data) {}
X(const std::vector<float> &a):Problem(a) {}
};
int main(int argc, char**)
{
srand(time(NULL));
std::vector<Shin::Solution> s;
std::vector<X> p;
//
s.push_back(Shin::Solution(std::vector<float>({1})));
p.push_back(X(std::vector<float>({0})));
s.push_back(Shin::Solution(std::vector<float>({0})));
p.push_back(X(std::vector<float>({1})));
Shin::NeuralNetwork::FeedForward q({1,5000,5000,5000,1});
Shin::NeuralNetwork::Learning::BackPropagation b(q);
if(argc >1)
{
std::cerr << "Allowing threadnig\n";
b.allowThreading();
}
for(int i=0;i<2;i++)
{
b.teach(p[i%2],s[i%2]);
std::cerr << i%2 <<". FOR: [" << p[i%2][0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
}
}

View File

@@ -1,61 +0,0 @@
#include "../src/NeuralNetwork/FeedForward"
#include "../src/NeuralNetwork/Learning/BackPropagation"
#include <iostream>
#include <vector>
class X: public Shin::Problem
{
public:
X(const X& a) :Problem(a) {}
X(const std::vector<float> &a):Problem() {data=a;}
};
int main()
{
srand(time(NULL));
for (int test=0;test<2;test++)
{
Shin::NeuralNetwork::FeedForward q({2,3,1});
Shin::NeuralNetwork::Learning::BackPropagation b(q);
std::vector<std::pair<Shin::Problem, Shin::Solution> > set;
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({0,0}),Shin::Solution({0})));
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({1,0}),Shin::Solution({1})));
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({1,1}),Shin::Solution({0})));
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({0,1}),Shin::Solution({1})));
if(test)
{
std::cerr << "Testing with entropy\n";
b.allowNoise();
}else
{
std::cerr << "Testing without entropy\n";
}
b.setLearningCoeficient(20);//8);
for(int j=0;;j++)
{
double err=b.teachSet(set);
if(err <0.3)
{
// b.setLearningCoeficient(5);
}
if(err <0.1)
{
// b.setLearningCoeficient(0.2);
}
if(err <0.001)
{
std::cerr << j << "(" << err <<"):\n";
for(int i=0;i<4;i++)
{
std::cerr << "\t" << i%4 <<". FOR: [" << set[i%4].first[0] << "," <<set[i%4].first[1] << "] res: " <<
q.solve(set[i%4].first)[0] << " should be " << set[i%4].second[0]<<"\n";
}
}
if(err <0.001)
break;
}
}
}

View File

@@ -1,60 +0,0 @@
#include "../src/NeuralNetwork/FeedForward"
#include "../src/NeuralNetwork/Learning/OpticalBackPropagation"
#include <iostream>
#include <vector>
class X: public Shin::Problem
{
public:
X(const X& a) :Problem(a) {}
X(const std::vector<float> &a):Problem() {data=a;}
};
int main()
{
srand(time(NULL));
for (int test=0;test<2;test++)
{
Shin::NeuralNetwork::FeedForward q({2,40,1});
Shin::NeuralNetwork::Learning::OpticalBackPropagation b(q);
b.setLearningCoeficient(0.1);
std::vector<std::pair<Shin::Problem, Shin::Solution> > set;
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({0,0}),Shin::Solution({0})));
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({1,0}),Shin::Solution({1})));
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({1,1}),Shin::Solution({0})));
set.push_back(std::pair<Shin::Problem, Shin::Solution>(Shin::Problem({0,1}),Shin::Solution({1})));
if(test)
{
std::cerr << "Testing with entropy\n";
b.allowNoise();
}else
{
std::cerr << "Testing without entropy\n";
}
for(int j=0;;j++)
{
double err=b.teachSet(set);
if(err <0.3)
{
// b.setLearningCoeficient(5);
}
if(err <0.1)
{
// b.setLearningCoeficient(0.2);
}
if(err <0.001)
{
std::cerr << j << "(" << err <<"):\n";
for(int i=0;i<4;i++)
{
std::cerr << "\t" << i%4 <<". FOR: [" << set[i%4].first[0] << "," <<set[i%4].first[1] << "] res: " <<
q.solve(set[i%4].first)[0] << " should be " << set[i%4].second[0]<<"\n";
}
}
if(err <0.001)
break;
}
}
}

View File

@@ -1,344 +0,0 @@
#include <allegro.h>
#include <cstdlib>
#include <time.h>
#include "../src/NeuronNetwork/Learning/QLearning.h"
#include <sys/time.h>
int learningGames=6000;
int ball_x = 320;
int ball_y = 240;
int ball_tempX = 320;
int ball_tempY = 240;
int p1_x = 20;
int p1_y = 210;
int p1_tempX = 20;
int p1_tempY = 210;
int p2_x = 620;
int p2_y = 210;
int p2_tempX = 620;
int p2_tempY = 210;
int i=0;
long game=0;
int q=0;
int speed=1;
bool randomLearner=0;
int dir; //This will keep track of the circles direction
//1= up and left, 2 = down and left, 3= up and right, 4 = down and right
BITMAP *buffer; //This will be our temporary bitmap for double buffering
class X: public Shin::NeuronNetwork::Problem
{
public:
X(int p1,int ballX,int ballY,int p2)//, int ballY)
{
data.push_back((float)p1/480.0);
data.push_back((float)ballX/640.0);
data.push_back((float)ballY/480.0);
}
};
Shin::NeuronNetwork::Learning::QLearning l(3,15,3);
std::vector <std::pair<Shin::NeuronNetwork::Problem,int>> p1x;
void propagateOKtoP1(double quality=10)
{
l.learnDelayed(p1x,quality);
p1x.clear();
}
void moveBall(){
ball_tempX = ball_x;
ball_tempY = ball_y;
if (dir == 1 && ball_x > 5 && ball_y > 5){
if( ball_x == p1_x + 15 && ball_y >= p1_y && ball_y <= p1_y + 60){
dir = rand()% 2 + 3;
propagateOKtoP1(100);
}else{
--ball_x;
--ball_y;
}
} else if (dir == 2 && ball_x > 5 && ball_y < 475){
if( ball_x == p1_x + 15 && ball_y >= p1_y && ball_y <= p1_y + 60){
dir = rand()% 2 + 3;
propagateOKtoP1(100);
}else{
--ball_x;
++ball_y;
}
} else if (dir == 3 && ball_x < 635 && ball_y > 5){
if( ball_x + 5 == p2_x && ball_y >= p2_y && ball_y <= p2_y + 60){
dir = rand()% 2 + 1;
}else{
++ball_x;
--ball_y;
}
} else if (dir == 4 && ball_x < 635 && ball_y < 475){
if( ball_x + 5 == p2_x && ball_y >= p2_y && ball_y <= p2_y + 60){
dir = rand()% 2 + 1;
}else{
++ball_x;
++ball_y;
}
} else {
if (dir == 1 || dir == 3) ++dir;
else if (dir == 2 || dir == 4) --dir;
}
}
char p1Move(){
X p=X(p1_y,ball_x,ball_y,p2_y);
if(game <learningGames)
{
if(randomLearner)
{
register int tmp=game%3;
if(rand()%5==0)
{
tmp=(tmp+rand())%3;
}
if(tmp==1)
{
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,2));//,ball_tempX,ball_tempY));
return 1;
}else if(tmp==0)
{
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,0));//,ball_tempX,ball_tempY));
return -1;
}else
{
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,1));//,ball_tempX,ball_tempY));
return 0;
}
}else
{
if( p1_tempY > ball_y && p1_y > 0){
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,0));//,ball_tempX,ball_tempY));
return -1;
} else if( p1_tempY < ball_y && p1_y < 420){
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,2));//,ball_tempX,ball_tempY));
return 1;
}else
{
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,1));//,ball_tempX,ball_tempY));
return 0;
}
}
}
int j=l.getChoice(p);
p1x.push_back(std::pair<Shin::NeuronNetwork::Problem,int>(p,j));//,ball_tempX,ball_tempY));
return j-1;
}
char p2Move(){
if(game >= learningGames)
{
if(key[KEY_UP])
return 1;
else if( key[KEY_DOWN])
return -1;
else
return 0;
}else
{
if(rand()%10==0)
{
return (rand()%3)-1;
}
if( p2_tempY > ball_y){
return -1;
} else if( p2_tempY < ball_y){
return 1;
}
return 0;
}
}
void startNew(){
clear_keybuf();
if(game==learningGames)
textout_ex( screen, font, "Player 1 learned! Push a button to start a game.", 160, 240, makecol( 255, 0, 0), makecol( 0, 0, 0));
if(game >= learningGames)
readkey();
clear_to_color( buffer, makecol( 0, 0, 0));
ball_x = 350;
ball_y = rand()%481;
p1_x = 20;
p1_y = 210;
p2_x = 620;
p2_y = 210;
}
void checkWin(){
int won=0;
if ( ball_x < p1_x){
won=1;
game++;
textout_ex( screen, font, "Player 2 Wins!", 320, 240, makecol( 255, 0, 0), makecol( 0, 0, 0));
propagateOKtoP1(-100);
startNew();
} else if ( ball_x > p2_x){
game++;
won=1;
textout_ex( screen, font, "Player 1 Wins!", 320, 240, makecol( 255, 0, 0), makecol( 0, 0, 0));
propagateOKtoP1(100);
startNew();
}
}
void setupGame(){
acquire_screen();
rectfill( buffer, p1_x, p1_y, p1_x + 10, p1_y + 60, makecol ( 0, 0, 255));
rectfill( buffer, p2_x, p2_y, p2_x + 10, p2_y + 60, makecol ( 0, 0, 255));
circlefill ( buffer, ball_x, ball_y, 5, makecol( 128, 255, 0));
draw_sprite( screen, buffer, 0, 0);
release_screen();
srand( time(NULL));
dir = rand() % 4 + 1;
}
int main(int argc, char**argv)
{
allegro_init();
install_keyboard();
set_color_depth(16);
set_gfx_mode( GFX_AUTODETECT_WINDOWED, 640, 480, 0, 0);
l.setLearningCoeficient(0.01,0.01);
if(argc>=4 && argv[3][0]=='o')
{
std::cerr << "USING Optical Backpropagation\n";
l.opticalBackPropagation();
}
if(argc>=3)
{
std::cerr << "Setting learning coefficients to:" << atof(argv[1]) << "," << atof(argv[2]) << "\n";
l.setLearningCoeficient(atof(argv[1]),atof(argv[2]));
}
if(argc >=5)
{
std::cerr << "Setting learning games to:" << atof(argv[4]) << "\n";
learningGames=atof(argv[4]);
}
if(argc >=6 && argv[5][0]=='r')
{
std::cerr << "Setting random learning\n";
randomLearner=1;
}
buffer = create_bitmap( 640, 480);
setupGame();
speed=51;
int sleepTime=1000;
while(!key[KEY_ESC])
{
q++;
if(key[KEY_T])
{
std::cout << "ADDING next 500 learning games\n";
usleep(500000);
learningGames+=500;
}
if(game < learningGames)
{
if( key[KEY_UP] && speed < 200){
speed+=5;
}else if( key[KEY_DOWN] && speed >1 ){
speed-=5;
}
if(speed <= 0)
{
speed=1;
}
}else
{
speed=1;
}
register char p1dir=p1Move();
register char p2dir=p2Move();
p1_tempY = p1_y;
p2_tempY = p2_y;
if(p1dir < 0 && p1_y > 0){
--p1_y;
} else if( p1dir > 0 && p1_y < 420){
++p1_y;
}
if(p2dir > 0 && p2_y > 0){
--p2_y;
} else if( p2dir < 0 && p2_y < 420){
++p2_y;
}
moveBall();
if(key[KEY_PLUS_PAD] && sleepTime >=10)
sleepTime-=50;
else if(key[KEY_MINUS_PAD] && sleepTime <=15000)
sleepTime+=50;
if(i%speed==0)
{
acquire_screen();
rectfill( buffer, p1_tempX, p1_tempY, p1_tempX + 10, p1_tempY + 60, makecol ( 0, 0, 0));
rectfill( buffer, p1_x, p1_y, p1_x + 10, p1_y + 60, makecol ( 0, 0, 255));
rectfill( buffer, p2_tempX, p2_tempY, p2_tempX + 10, p2_tempY + 60, makecol ( 0, 0, 0));
rectfill( buffer, p2_x, p2_y, p2_x + 10, p2_y + 60, makecol ( 0, 0, 255));
circlefill ( buffer, ball_tempX, ball_tempY, 5, makecol( 0, 0, 0));
circlefill ( buffer, ball_x, ball_y, 5, makecol( 128, 255, 0));
draw_sprite( screen, buffer, 0, 0);
release_screen();
usleep(sleepTime);
}
checkWin();
i++;
}
return 0;
}
END_OF_MAIN()

View File

@@ -1,95 +0,0 @@
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/Reinforcement.h"
#include "../src/NeuronNetwork/Solution.h"
#include <iostream>
#include <vector>
class X: public Shin::NeuronNetwork::Problem
{
public:
X(const X& a) :q(a.q) {}
X(const std::vector<float> &a):q(a) {}
std::vector<float> representation() const
{
return q;
}
protected:
std::vector<float> q;
};
int main()
{
srand(time(NULL));
std::vector<X> p;
p.push_back(X(std::vector<float>({0,0})));
p.push_back(X(std::vector<float>({1,1})));
Shin::NeuronNetwork::FeedForward q({2,6,2});
Shin::NeuronNetwork::Learning::Reinforcement b(q);
b.getPropagator().setLearningCoeficient(1);
int i=0;
b.setQualityFunction(
[&i](const Shin::NeuronNetwork::Problem &,const Shin::NeuronNetwork::Solution &s)->float
{
if(i%2==0)
{
//ocekavame 1
int e=(s[0]-0.80)*15.0;//+(abs(s[1])-0.5)*100.0;
return e;
}else
{
//ocekavame 0
int e=(0.20-s[0])*15.0;//+(0.4-abs(s[1]))*100.0;
return e;
}
return 1.0;
});
for(i=0;i < 500000000;i++)
{
if(i==75000)
{
std::cerr << "SSSSSS1XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n";
b.setCoef(1);
}
if(i==150000)
{
std::cerr << "SSSSSS1XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n";
b.setCoef(0.51);
}
if(i==300000)
{
std::cerr << "SSSSSS2XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n";
b.setCoef(0.15);
}
b.learn(p[i%2]);
if(i%100000==0)
srand(time(NULL));
if(i%10000==0)
for(int j=0;j<2;j++)
{
std::cerr << j%4 <<". FOR: [" << p[j%4].representation()[0] << "," <<p[j%4].representation()[0] << "] res: " << q.solve(p[j%4])[0] << "\n";
}
}
/* int i=0;
std::cerr << i%4 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
for(int i=0;i<2000;i++)sa
{
b.teach(p[i%2],s[i%2]);
std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
}
b.debugOn();
for(int i=0;i<2;i++)
{
b.teach(p[i%2],s[i%2]);
std::cerr << i%4 <<". FOR: [" << p[i%4].representation()[0] << "," <<p[i%4].representation()[0] << "] res: " << q.solve(p[i%4])[0] << " should be " <<
s[i%4][0]<<"\n";
}
b.debugOff();*/
}

View File

@@ -1,82 +0,0 @@
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/Reinforcement.h"
#include "../src/NeuronNetwork/Solution.h"
#include <iostream>
#include <vector>
class X: public Shin::NeuronNetwork::Problem
{
public:
X(const X& a) :Problem(a) {}
X(const std::vector<float> &a):Problem() {data=a;}
};
int main()
{
srand(time(NULL));
std::vector<Shin::NeuronNetwork::Problem*> p;
p.push_back(new X(std::vector<float>({0,0})));
p.push_back(new X(std::vector<float>({1,1})));
p.push_back(new X(std::vector<float>({1,0})));
p.push_back(new X(std::vector<float>({0,1})));
Shin::NeuronNetwork::FeedForward q({2,1});
Shin::NeuronNetwork::Learning::Reinforcement b(q);
int i=0;
double targetQuality=0.5;
b.setQualityFunction(
[](const Shin::NeuronNetwork::Problem &pr,const Shin::NeuronNetwork::Solution &s)->float
{
if(pr[0]==1 && pr[1]==1)
{
//ocekavame 1
int e=(s[0]-0.80)*15.0;//+(abs(s[1])-0.5)*100.0;
return e;
}else
{
//ocekavame 0
int e=(0.20-s[0])*15.0;//+(0.4-abs(s[1]))*100.0;
return e;
}
return 1.0;
});
for(i=0;i < 500000000;i++)
{
double err=b.learnSet(p);
if(i%100000==0)
srand(time(NULL));
if(err > targetQuality||i%1000==0)
{
std::cerr << i << " ("<< err <<").\n";
for(int j=0;j<4;j++)
{
std::cerr << j%4 <<". FOR: [" << p[j%4]->operator[](0) << "," <<p[j%4]->operator[](0) << "] res: " << q.solve(*p[j%4])[0] << "\n";
}
}
if(err >targetQuality)
break;
}
/* int i=0;
std::cerr << i%4 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
for(int i=0;i<2000;i++)sa
{
b.teach(p[i%2],s[i%2]);
std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n";
}
b.debugOn();
for(int i=0;i<2;i++)
{
b.teach(p[i%2],s[i%2]);
std::cerr << i%4 <<". FOR: [" << p[i%4].representation()[0] << "," <<p[i%4].representation()[0] << "] res: " << q.solve(p[i%4])[0] << " should be " <<
s[i%4][0]<<"\n";
}
b.debugOff();*/
}

View File

@@ -1,99 +0,0 @@
#include "../src/NeuronNetwork/FeedForward"
#include "../src/NeuronNetwork/Learning/Reinforcement"
#include "../src/NeuronNetwork/Learning/OpticalBackPropagation"
#include <iostream>
#include <vector>
class X: public Shin::NeuronNetwork::Problem
{
public:
X(const X& a) :Problem(a) {}
X(const std::vector<float> &a):Problem() {data=a;}
};
int main()
{
srand(time(NULL));
for (int test=0;test<3;test++)
{
Shin::NeuronNetwork::FeedForward q({2,4,1});
Shin::NeuronNetwork::Learning::Reinforcement b(q);
//b.setPropagator(new Shin::NeuronNetwork::Learning::OpticalBackPropagation(q));
b.getPropagator().setLearningCoeficient(0.4);
//b.getPropagator().allowEntropy();
double targetQuality =2.9;
if(test==2)
{
targetQuality =1.62;
std::cerr << "Testing with OBP ...\n";
b.setPropagator(new Shin::NeuronNetwork::Learning::OpticalBackPropagation(q));
b.getPropagator().setLearningCoeficient(0.5);
}
b.setQualityFunction(
[](const Shin::NeuronNetwork::Problem &p,const Shin::NeuronNetwork::Solution &s)->float
{
float expect=0.0;
if(p[0] && p[1])
expect=0;
else if(p[0] && !p[1])
expect=1;
else if(!p[0] && !p[1])
expect=0;
else if(!p[0] && p[1])
expect=1;
// std::cerr << "expected: " << expect << " got " << s[0];
if(expect==0)
{
expect=0.3-abs(s[0]);
}else
{
expect=s[0]-0.7;
}
// std::cerr << " returnning " << expect*5.0 << "\n";
return expect*19.0;
});
std::vector<Shin::NeuronNetwork::Problem*> p;
p.push_back(new X(std::vector<float>({0,0})));
p.push_back( new X(std::vector<float>({1,0})));
p.push_back( new X(std::vector<float>({0,1})));
p.push_back(new X(std::vector<float>({1,1})));
if(test==1)
{
std::cerr << "Testing with entropy ...\n";
b.getPropagator().allowNoise();
}else
{
std::cerr << "Testing without entropy ...\n";
}
for(int i=0;i < 500000000;i++)
// for(int i=0;i < 5;i++)
{
double err=b.learnSet(p);
if(i%100000==0)
srand(time(NULL));
if(i%200000==0 || err > targetQuality)
{
std::cerr << i << " ("<< err <<").\n";
for(int j=0;j<4;j++)
{
std::cerr << "\t" << i%4 <<". FOR: [" << p[j%4]->operator[](0) << "," <<p[j%4]->operator[](1) << "] res: " <<
q.solve(*p[j%4])[0] << "\n";
}
}
if(err >targetQuality)
break;
}
}
}

View File

@@ -1,99 +0,0 @@
#include "../src/NeuronNetwork/Learning/QLearning.h"
#include <iostream>
#include <vector>
class X: public Shin::NeuronNetwork::Problem
{
public:
X(const X& a) :Problem(a) {}
X(const std::vector<float> &a):Problem() {data=a;}
};
float atof(char *s)
{
int f, m, sign, d=1;
f = m = 0;
sign = (s[0] == '-') ? -1 : 1;
if (s[0] == '-' || s[0] == '+') s++;
for (; *s != '.' && *s; s++) {
f = (*s-'0') + f*10;
}
if (*s == '.')
for (++s; *s; s++) {
m = (*s-'0') + m*10;
d *= 10;
}
return sign*(f + (float)m/d);
}
float AA=10;
float getQuality(X& p, int action)
{
if((p[0]==0&& p[1]==0) ||(p[0]==1&& p[1]==1)) //should be 0
{
return action==1?-AA:AA;
}else // should be 1
{
return action==0?-AA:AA;
}
}
int main(int argc, char **argv)
{
srand(time(NULL));
Shin::NeuronNetwork::Learning::QLearning l(2,45,2);
if(argc==4 && argv[3][0]=='o')
{
std::cerr << "USING Optical Backpropagation\n";
l.opticalBackPropagation();
}
if(argc>=3)
{
std::cerr << "Setting learning coefficients to:" << atof(argv[1]) << "," << atof(argv[2]) << "\n";
l.setLearningCoeficient(atof(argv[1]),atof(argv[2]));
}
std::vector <std::pair<Shin::NeuronNetwork::Solution,Shin::NeuronNetwork::Problem>> p1x;
std::vector <X> states;
states.push_back(X(std::vector<float>({1,0})));
states.push_back(X(std::vector<float>({0,0})));
states.push_back(X(std::vector<float>({1,1})));
states.push_back(X(std::vector<float>({0,1})));
unsigned long step=0;
double quality=0;
while(step< 600000 && quality < (3.9*AA))
{
quality=0;
if(step%10000==0)
std::cerr << "STEP " << step << "\n";
for(unsigned i=0;i<states.size();i++)
{
int choice=l.getChoice(states[i]);
l.learn(states[i],choice,quality);
}
for(unsigned i=0;i<states.size();i++)
{
int choice=l.getChoice(states[i]);
quality+=getQuality(states[i],choice);
if(step%10000==0)
{
Shin::NeuronNetwork::Solution sol=l.getSolution(states[i]);
std::cerr << "\tState: [" << states[i][0] << "," << states[i][1] << "] Q-function: [" << sol[0] << "," <<sol[1] << "] Action " << choice << "\n";
}
}
step++;
}
std::cerr << step << "\n";
for(unsigned i=0;i<states.size();i++)
{
Shin::NeuronNetwork::Solution sol=l.getSolution(states[i]);
int choice=l.getChoice(states[i]);
std::cerr << "State: [" << states[i][0] << "," << states[i][1] << "] Q-function: [" << sol[0] << "," <<sol[1] << "] Action " << choice << "\n";
}
}

View File

@@ -1,49 +0,0 @@
#include "../src/NeuralNetwork/FeedForward"
#include "../src/NeuralNetwork/Learning/BackPropagation"
#include <iostream>
#include <vector>
//typedef Shin::NeuronNetwork::Problem X;
class X: public Shin::Problem
{
public:
X(const X& a) :Problem(a) {}
X(const std::vector<float> &a):Problem() {for(auto q:a){ data.push_back(q);}}
protected:
};
int main(int argc,char**)
{
srand(time(NULL));
std::vector<Shin::Solution> s;
std::vector<X> p;
p.push_back(X(std::vector<float>({0,0})));
s.push_back(Shin::Solution(std::vector<float>({0.4,0.3,0.2,0.1})));
p.push_back(X(std::vector<float>({0,0.5})));
s.push_back(Shin::Solution(std::vector<float>({0.6,0.3,0.2,0.5})));
p.push_back(X(std::vector<float>({0.4,0.5})));
s.push_back(Shin::Solution(std::vector<float>({0.4,0.4,0.2,0.8})));
Shin::NeuralNetwork::FeedForward q({2,4,4,4},1.0);
Shin::NeuralNetwork::Learning::BackPropagation bp(q);
bp.setLearningCoeficient(0.2);
for(int i=0;i<3;i++)
{
Shin::Solution sp =q.solve(p[i]);
std::cerr << sp[0] << "," << sp[1] << "," << sp[2] << "," << sp[3] << "\n";
}
for(int i=0;i<4;i++)
{
for(int j=0;j<3;j++)
{
bp.teach(p[j],s[j]);
}
}
std::cerr << "XXXXXXXXXXXX\n";
for(int i=0;i<3;i++)
{
Shin::Solution sp =q.solve(p[i]);
std::cerr << sp[0] << "," << sp[1] << "," << sp[2] << "," << sp[3] << "\n";
}
}

30
tests/recurrent.cpp Normal file
View File

@@ -0,0 +1,30 @@
#include <NeuralNetwork/Recurrent/Network.h>
#include <iostream>
int main() {
NeuralNetwork::Recurrent::Network a(2,1,1);
/* a.getNeurons()[3].setWeight(a.getNeurons()[2],0.00000001565598595);
a.getNeurons()[2].setWeight(a.getNeurons()[3],0.00000001565598595);
a.getNeurons()[3].setWeight(a.getNeurons()[1],0.00000001565598595);
a.getNeurons()[3].setWeight(a.getNeurons()[0],0.00000001565598595);
a.computeOutput({0.5,0});
std::cout << a;
NeuralNetwork::Recurrent::Network b(a.stringify());
*/
a.getNeurons()[3].setWeight(a.getNeurons()[0],0.05);
a.getNeurons()[3].setWeight(a.getNeurons()[1],0.05);
a.getNeurons()[3].setWeight(a.getNeurons()[2],0.7);
a.getNeurons()[2].setWeight(a.getNeurons()[3],0.1);
std::cout << a;
for(int i=0;i<40;i++) {
std::cout << a.computeOutput({1,0.7})[0] << "\n";
}
std::cout << a;
}