diff --git a/.gitignore b/.gitignore index 8977362..be90243 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,7 @@ NN.kdev4 .kdev4 *.o *.a +*.so *.nm +/doc/html/* +!/doc/html/doxy-boot.js diff --git a/Doxyfile b/Doxyfile new file mode 100644 index 0000000..028e42f --- /dev/null +++ b/Doxyfile @@ -0,0 +1,1210 @@ +# Doxyfile 1.4.7 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project +# +# All text after a hash (#) is considered a comment and will be ignored +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" ") + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# by quotes) that should identify the project. + +PROJECT_NAME = "Artificial Neural Network Library" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = doc + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, +# Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese, +# Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian, +# Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, +# Swedish, and Ukrainian. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = YES + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful is your file systems +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = YES + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like the Qt-style comments (thus requiring an +# explicit @brief command for a brief description. + +JAVADOC_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for Java. +# For instance, namespaces will be presented as packages, qualified scopes +# will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to +# include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = NO + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = YES + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or define consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and defines in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from the +# version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = YES + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be abled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = "./" + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx +# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py + +FILE_PATTERNS = *.cpp *.h *.dox + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used select whether or not files or +# directories that are symbolic links (a Unix filesystem feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER +# is applied to all files. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = YES + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C and C++ comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES (the default) +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = YES + +# If the REFERENCES_RELATION tag is set to YES (the default) +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = YES + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. Otherwise they will link to the documentstion. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = NO + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. + +HTML_HEADER = ./doc/header.html + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = ./doc/footer.html + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet. Note that doxygen will try to copy +# the style sheet file to the HTML output directory, so don't put your own +# stylesheet in the HTML output directory as well, or it will be erased! + +HTML_STYLESHEET = + +HTML_EXTRA_STYLESHEET = ./doc/customdoxygen.css + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. + +DISABLE_INDEX = NO + +# This tag can be used to set the number of enum values (range [1..20]) +# that doxygen will group on one line in the generated HTML documentation. + +ENUM_VALUES_PER_LINE = 4 + +# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be +# generated containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, +# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are +# probably better off using the HTML help feature. + +GENERATE_TREEVIEW = NO + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, a4wide, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4wide + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = NO + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = NO + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. This is useful +# if you want to understand what is going on. On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = NO + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# in the INCLUDE_PATH (see below) will be search if a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all function-like macros that are alone +# on a line, have an all uppercase name, and do not end with a semicolon. Such +# function macros are typically used for boiler-plate code, and will confuse +# the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. +# Optionally an initial location of the external documentation +# can be added for each tagfile. The format of a tag file without +# this location is as follows: +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths or +# URLs. If a location is present for each tag, the installdox tool +# does not have to be run to correct the links. +# Note that each tag file must have a unique name +# (where the name does NOT include the path) +# If a tag file is not located in the directory in which doxygen +# is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option is superseded by the HAVE_DOT option below. This is only a +# fallback. It is recommended to install and use dot, since it yields more +# powerful graphs. + +CLASS_DIAGRAMS = YES + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = YES + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# the CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = YES + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will +# generate a call dependency graph for every global function or class method. +# Note that enabling this option will significantly increase the time of a run. +# So in most cases it will be better to enable call graphs for selected +# functions only using the \callgraph command. + +CALL_GRAPH = YES + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then doxygen will +# generate a caller dependency graph for every global function or class method. +# Note that enabling this option will significantly increase the time of a run. +# So in most cases it will be better to enable caller graphs for selected +# functions only using the \callergraph command. + +CALLER_GRAPH = YES + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are png, jpg, or gif +# If left blank png will be used. + +DOT_IMAGE_FORMAT = png + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that a graph may be further truncated if the graph's +# image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH +# and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default), +# the graph is not depth-constrained. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, which results in a white background. +# Warning: Depending on the platform used, enabling this option may lead to +# badly anti-aliased labels on the edges of a graph (i.e. they become hard to +# read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = NO + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to the search engine +#--------------------------------------------------------------------------- + +# The SEARCHENGINE tag specifies whether or not a search engine should be +# used. If set to NO the values of all tags below this one will be ignored. + +SEARCHENGINE = NO diff --git a/Makefile b/Makefile index 72c946d..6e51589 100644 --- a/Makefile +++ b/Makefile @@ -7,23 +7,35 @@ all:|pre libs pre: @mkdir -p lib -libs: genetics nn +libs: ml genetics nn test: all make -C tests -nn: | nn_build lib/NeuronNetwork.a lib/NeuronNetwork.so -lib/NeuronNetwork.so: ./src/NeuronNetwork/NeuronNetwork.so - cp ./src/NeuronNetwork/NeuronNetwork.so ./lib/ +ml: | ml_build lib/MachineLearning.a lib/MachineLearning.so -lib/NeuronNetwork.a: ./src/NeuronNetwork/NeuronNetwork.a - cp ./src/NeuronNetwork/NeuronNetwork.a ./lib/ - cp ./src/NeuronNetwork/NeuronNetwork.nm ./lib/ +lib/MachineLearning.so: ./src/MachineLearning/MachineLearning.so + cp ./src/MachineLearning/MachineLearning.so ./lib/ + +lib/MachineLearning.a: ./src/MachineLearning/MachineLearning.a + cp ./src/MachineLearning/MachineLearning.a ./lib/ + cp ./src/MachineLearning/MachineLearning.nm ./lib/ + +ml_build: + @make -C src/MachineLearning + +nn: | nn_build lib/NeuralNetwork.a lib/NeuralNetwork.so + +lib/NeuralNetwork.so: ./src/NeuralNetwork/NeuralNetwork.so + cp ./src/NeuralNetwork/NeuralNetwork.so ./lib/ + +lib/NeuralNetwork.a: ./src/NeuralNetwork/NeuralNetwork.a + cp ./src/NeuralNetwork/NeuralNetwork.a ./lib/ + cp ./src/NeuralNetwork/NeuralNetwork.nm ./lib/ nn_build: - @make -C src/NeuronNetwork - + @make -C src/NeuralNetwork genetics: | genetics_build lib/Genetics.a lib/Genetics.so @@ -37,10 +49,14 @@ lib/Genetics.a: ./src/Genetics/Genetics.a genetics_build: @make -C src/Genetics +documentation: + doxygen + clean: + @make -C src/MachineLearning clean @make -C src/Genetics clean - @make -C src/NeuronNetwork clean + @make -C src/NeuralNetwork clean @make -C tests clean #@rm -f ./*.so ./*.a ./*.nm @rm -f ./lib/*.so ./lib/*.a ./lib/*.nm - @echo "Cleaned....." \ No newline at end of file + @echo "Cleaned....." diff --git a/Makefile.const b/Makefile.const index 0f849ed..bdb6785 100644 --- a/Makefile.const +++ b/Makefile.const @@ -5,7 +5,7 @@ CXXFLAGS+= -std=c++14 CXXFLAGS+= -pg -fPIC CXXFLAGS+= -g CXXFLAGS+= -fPIC -pthread - +#CXXFLAGS+= -DUSE_SSE2 OPTIMALIZATION = -O3 -march=native -mtune=native %.o : %.cpp %.h diff --git a/doc/customdoxygen.css b/doc/customdoxygen.css new file mode 100644 index 0000000..7bf2f5e --- /dev/null +++ b/doc/customdoxygen.css @@ -0,0 +1,255 @@ +h1, .h1, h2, .h2, h3, .h3{ + font-weight: 200 !important; +} + +#navrow1, #navrow2, #navrow3, #navrow4, #navrow5{ + border-bottom: 1px solid #EEEEEE; +} + +.adjust-right { +margin-left: 30px !important; +font-size: 1.15em !important; +} +.navbar{ + border: 0px solid #222 !important; +} + + +/* Sticky footer styles +-------------------------------------------------- */ +html, +body { + height: 100%; + /* The html and body elements cannot have any padding or margin. */ +} + +/* Wrapper for page content to push down footer */ +#wrap { + min-height: 100%; + height: auto; + /* Negative indent footer by its height */ + margin: 0 auto -60px; + /* Pad bottom by footer height */ + padding: 0 0 60px; +} + +/* Set the fixed height of the footer here */ +#footer { + font-size: 0.9em; + padding: 8px 0px; + background-color: #f5f5f5; +} + +.footer-row { + line-height: 44px; +} + +#footer > .container { + padding-left: 15px; + padding-right: 15px; +} + +.footer-follow-icon { + margin-left: 3px; + text-decoration: none !important; +} + +.footer-follow-icon img { + width: 20px; +} + +.footer-link { + padding-top: 5px; + display: inline-block; + color: #999999; + text-decoration: none; +} + +.footer-copyright { + text-align: center; +} + + +@media (min-width: 992px) { + .footer-row { + text-align: left; + } + + .footer-icons { + text-align: right; + } +} +@media (max-width: 991px) { + .footer-row { + text-align: center; + } + + .footer-icons { + text-align: center; + } +} + +/* DOXYGEN Code Styles +----------------------------------- */ + + +a.qindex { + font-weight: bold; +} + +a.qindexHL { + font-weight: bold; + background-color: #9CAFD4; + color: #ffffff; + border: 1px double #869DCA; +} + +.contents a.qindexHL:visited { + color: #ffffff; +} + +a.code, a.code:visited, a.line, a.line:visited { + color: #4665A2; +} + +a.codeRef, a.codeRef:visited, a.lineRef, a.lineRef:visited { + color: #4665A2; +} + +/* @end */ + +dl.el { + margin-left: -1cm; +} + +pre.fragment { + border: 1px solid #C4CFE5; + background-color: #FBFCFD; + padding: 4px 6px; + margin: 4px 8px 4px 2px; + overflow: auto; + word-wrap: break-word; + font-size: 9pt; + line-height: 125%; + font-family: monospace, fixed; + font-size: 105%; +} + +div.fragment { + padding: 4px 6px; + margin: 4px 8px 4px 2px; + border: 1px solid #C4CFE5; +} + +div.line { + font-family: monospace, fixed; + font-size: 13px; + min-height: 13px; + line-height: 1.0; + text-wrap: unrestricted; + white-space: -moz-pre-wrap; /* Moz */ + white-space: -pre-wrap; /* Opera 4-6 */ + white-space: -o-pre-wrap; /* Opera 7 */ + white-space: pre-wrap; /* CSS3 */ + word-wrap: break-word; /* IE 5.5+ */ + text-indent: -53px; + padding-left: 53px; + padding-bottom: 0px; + margin: 0px; + -webkit-transition-property: background-color, box-shadow; + -webkit-transition-duration: 0.5s; + -moz-transition-property: background-color, box-shadow; + -moz-transition-duration: 0.5s; + -ms-transition-property: background-color, box-shadow; + -ms-transition-duration: 0.5s; + -o-transition-property: background-color, box-shadow; + -o-transition-duration: 0.5s; + transition-property: background-color, box-shadow; + transition-duration: 0.5s; +} + +div.line.glow { + background-color: cyan; + box-shadow: 0 0 10px cyan; +} + + +span.lineno { + padding-right: 4px; + text-align: right; + border-right: 2px solid #0F0; + background-color: #E8E8E8; + white-space: pre; +} +span.lineno a { + background-color: #D8D8D8; +} + +span.lineno a:hover { + background-color: #C8C8C8; +} + +div.groupHeader { + margin-left: 16px; + margin-top: 12px; + font-weight: bold; +} + +div.groupText { + margin-left: 16px; + font-style: italic; +} + +/* @group Code Colorization */ + +span.keyword { + color: #008000 +} + +span.keywordtype { + color: #604020 +} + +span.keywordflow { + color: #e08000 +} + +span.comment { + color: #800000 +} + +span.preprocessor { + color: #806020 +} + +span.stringliteral { + color: #002080 +} + +span.charliteral { + color: #008080 +} + +span.vhdldigit { + color: #ff00ff +} + +span.vhdlchar { + color: #000000 +} + +span.vhdlkeyword { + color: #700070 +} + +span.vhdllogic { + color: #ff0000 +} + +blockquote { + background-color: #F7F8FB; + border-left: 2px solid #9CAFD4; + margin: 0 24px 0 4px; + padding: 0 12px 0 16px; +} + diff --git a/doc/footer.html b/doc/footer.html new file mode 100644 index 0000000..f2fa204 --- /dev/null +++ b/doc/footer.html @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + diff --git a/doc/header.html b/doc/header.html new file mode 100644 index 0000000..d71678c --- /dev/null +++ b/doc/header.html @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + $projectname: $title + $title + + + $treeview + $search + $mathjax + + $extrastylesheet + + + + + + + +
+
+
+
+
+
+ diff --git a/doc/html/doxy-boot.js b/doc/html/doxy-boot.js new file mode 100644 index 0000000..5ee5fa3 --- /dev/null +++ b/doc/html/doxy-boot.js @@ -0,0 +1,120 @@ +$( document ).ready(function() { + + $("div.headertitle").addClass("page-header"); + $("div.title").addClass("h1"); + + $('li > a[href="index.html"] > span').before(" "); + $('li > a[href="modules.html"] > span').before(" "); + $('li > a[href="namespaces.html"] > span').before(" "); + $('li > a[href="annotated.html"] > span').before(" "); + $('li > a[href="classes.html"] > span').before(" "); + $('li > a[href="inherits.html"] > span').before(" "); + $('li > a[href="functions.html"] > span').before(" "); + $('li > a[href="functions_func.html"] > span').before(" "); + $('li > a[href="functions_vars.html"] > span').before(" "); + $('li > a[href="functions_enum.html"] > span').before(" "); + $('li > a[href="functions_eval.html"] > span').before(" "); + $('img[src="ftv2ns.png"]').replaceWith('N '); + $('img[src="ftv2cl.png"]').replaceWith('C '); + + $("ul.tablist").addClass("nav nav-pills nav-justified"); + $("ul.tablist").css("margin-top", "0.5em"); + $("ul.tablist").css("margin-bottom", "0.5em"); + $("li.current").addClass("active"); + $("iframe").attr("scrolling", "yes"); + + $("#nav-path > ul").addClass("breadcrumb"); + + $("table.params").addClass("table"); + $("div.ingroups").wrapInner(""); + $("div.levels").css("margin", "0.5em"); + $("div.levels > span").addClass("btn btn-default btn-xs"); + $("div.levels > span").css("margin-right", "0.25em"); + + $("table.directory").addClass("table table-striped"); + $("div.summary > a").addClass("btn btn-default btn-xs"); + $("table.fieldtable").addClass("table"); + $(".fragment").addClass("well"); + $(".memitem").addClass("panel panel-default"); + $(".memproto").addClass("panel-heading"); + $(".memdoc").addClass("panel-body"); + $("span.mlabel").addClass("label label-info"); + + $("table.memberdecls").addClass("table"); + $("[class^=memitem]").addClass("active"); + + $("div.ah").addClass("btn btn-default"); + $("span.mlabels").addClass("pull-right"); + $("table.mlabels").css("width", "100%") + $("td.mlabels-right").addClass("pull-right"); + + $("div.ttc").addClass("panel panel-primary"); + $("div.ttname").addClass("panel-heading"); + $("div.ttname a").css("color", 'white'); + $("div.ttdef,div.ttdoc,div.ttdeci").addClass("panel-body"); + + $('#MSearchBox').parent().remove(); + + $('div.fragment.well div.line:first').css('margin-top', '15px'); + $('div.fragment.well div.line:last').css('margin-bottom', '15px'); + + $('table.doxtable').removeClass('doxtable').addClass('table table-striped table-bordered').each(function(){ + $(this).prepend(''); + $(this).find('tbody > tr:first').prependTo($(this).find('thead')); + + $(this).find('td > span.success').parent().addClass('success'); + $(this).find('td > span.warning').parent().addClass('warning'); + $(this).find('td > span.danger').parent().addClass('danger'); + }); + + + + if($('div.fragment.well div.ttc').length > 0) + { + $('div.fragment.well div.line:first').parent().removeClass('fragment well'); + } + + $('table.memberdecls').find('.memItemRight').each(function(){ + $(this).contents().appendTo($(this).siblings('.memItemLeft')); + $(this).siblings('.memItemLeft').attr('align', 'left'); + }); + + function getOriginalWidthOfImg(img_element) { + var t = new Image(); + t.src = (img_element.getAttribute ? img_element.getAttribute("src") : false) || img_element.src; + return t.width; + } + + $('div.dyncontent').find('img').each(function(){ + if(getOriginalWidthOfImg($(this)[0]) > $('#content>div.container').width()) + $(this).css('width', '100%'); + }); + + $(".memitem").removeClass('memitem'); + $(".memproto").removeClass('memproto'); + $(".memdoc").removeClass('memdoc'); + $("span.mlabel").removeClass('mlabel'); + $("table.memberdecls").removeClass('memberdecls'); + $("[class^=memitem]").removeClass('memitem'); + $("span.mlabels").removeClass('mlabels'); + $("table.mlabels").removeClass('mlabels'); + $("td.mlabels-right").removeClass('mlabels-right'); + $(".navpath").removeClass('navpath'); + $("li.navelem").removeClass('navelem'); + $("a.el").removeClass('el'); + $("div.ah").removeClass('ah'); + $("div.header").removeClass("header"); + + $('.mdescLeft').each(function(){ + if($(this).html()==" ") { + $(this).siblings('.mdescRight').attr('colspan', 2); + $(this).remove(); + } + }); + $('td.memItemLeft').each(function(){ + if($(this).siblings('.memItemRight').html()=="") { + $(this).attr('colspan', 2); + $(this).siblings('.memItemRight').remove(); + } + }); +}); \ No newline at end of file diff --git a/mainpage.dox b/mainpage.dox new file mode 100644 index 0000000..6484379 --- /dev/null +++ b/mainpage.dox @@ -0,0 +1,9 @@ +/** +@mainpage Artificial Neural Network Library project documentation + +@brief C++ library for Artificial Neural Networks + +@author Tomas Cernik (Tom.Cernik@gmail.com) + +TODO +*/ diff --git a/src/NeuronNetwork/IO b/src/IO similarity index 100% rename from src/NeuronNetwork/IO rename to src/IO diff --git a/src/NeuronNetwork/IO.cpp b/src/IO.cpp similarity index 70% rename from src/NeuronNetwork/IO.cpp rename to src/IO.cpp index 046f207..c19f90e 100644 --- a/src/NeuronNetwork/IO.cpp +++ b/src/IO.cpp @@ -1,6 +1,6 @@ #include "./IO" -Shin::NeuronNetwork::IO Shin::NeuronNetwork::IO::operator+(const IO &r) +Shin::IO Shin::IO::operator+(const IO &r) { Shin::NeuronNetwork::IO tmp; for(float a:this->data) diff --git a/src/NeuronNetwork/IO.h b/src/IO.h similarity index 90% rename from src/NeuronNetwork/IO.h rename to src/IO.h index c35e385..12acb00 100644 --- a/src/NeuronNetwork/IO.h +++ b/src/IO.h @@ -6,13 +6,11 @@ namespace Shin { -namespace NeuronNetwork -{ class IO { public: IO() {}; - IO(std::vector &d) : data(d) {} + IO(const std::vector &d) : data(d) {} IO(const IO &old) : data(old.data) {} IO(const std::initializer_list &a):data(a) { } virtual ~IO() {}; @@ -27,5 +25,4 @@ class IO private: }; } -} #endif \ No newline at end of file diff --git a/src/MachineLearning/Learning.h b/src/MachineLearning/Learning.h new file mode 100644 index 0000000..9d73183 --- /dev/null +++ b/src/MachineLearning/Learning.h @@ -0,0 +1,35 @@ +#ifndef _S_ML_LEARNING_H_ +#define _S_ML_LEARNING_H_ + +#include + +namespace Shin +{ +namespace MachineLearning +{ + const float LearningCoeficient=0.4; + const float DefaultNoiseSize=500; + class Learning + { + public: + inline Learning() {}; + inline virtual ~Learning() {}; + + inline virtual void setLearningCoeficient (const float& coef) { learningCoeficient=coef; }; + + inline virtual void allowThreading() final {allowThreads=1;} + inline virtual void disableThreading() final {allowThreads=0;} + + inline virtual void allowNoise() final {noise=1;} + inline virtual void disableNoise() final {noise=0;} + inline virtual void setNoiseSize(const unsigned& milipercents) final { noiseSize=milipercents; } + + protected: + float learningCoeficient=Shin::MachineLearning::LearningCoeficient; + bool allowThreads=0; + bool noise=0; + unsigned noiseSize=Shin::MachineLearning::DefaultNoiseSize; + }; +} +} +#endif \ No newline at end of file diff --git a/src/NeuronNetwork/Makefile b/src/MachineLearning/Makefile similarity index 54% rename from src/NeuronNetwork/Makefile rename to src/MachineLearning/Makefile index 6046573..3d169c6 100644 --- a/src/NeuronNetwork/Makefile +++ b/src/MachineLearning/Makefile @@ -1,12 +1,9 @@ OBJFILES=\ - FeedForward.o\ - Learning/Supervised.o Learning/BackPropagation.o Learning/OpticalBackPropagation.o\ - Learning/Unsupervised.o Learning/Reinforcement.o Learning/RL/QFunction.o Learning/QLearning.o\ - ./IO.o + QLearning.o -LINKFILES= ../sse_mathfun.o +LINKFILES= -LIBNAME=NeuronNetwork +LIBNAME=MachineLearning include ../../Makefile.const @@ -17,7 +14,7 @@ lib: $(LIBNAME).so $(LIBNAME).a $(LIBNAME).so: $(OBJFILES) $(CXX) -shared $(CXXFLAGS) $(OBJFILES) $(LINKFILES) -o $(LIBNAME).so -$(LIBNAME).a: $(OBJFILES) ./Neuron.h ./Network.h ./Solution.h ./Problem.h +$(LIBNAME).a: $(OBJFILES) ./Learning.h rm -f $(LIBNAME).a # create new library ar rcv $(LIBNAME).a $(OBJFILES) $(LINKFILES) ranlib $(LIBNAME).a diff --git a/src/NeuronNetwork/Learning/RL/QFunction b/src/MachineLearning/QFunction similarity index 100% rename from src/NeuronNetwork/Learning/RL/QFunction rename to src/MachineLearning/QFunction diff --git a/src/NeuronNetwork/Learning/RL/QFunction.cpp b/src/MachineLearning/QFunction.cpp similarity index 68% rename from src/NeuronNetwork/Learning/RL/QFunction.cpp rename to src/MachineLearning/QFunction.cpp index b87c80e..2692593 100644 --- a/src/NeuronNetwork/Learning/RL/QFunction.cpp +++ b/src/MachineLearning/QFunction.cpp @@ -45,18 +45,6 @@ void Shin::NeuronNetwork::RL::QFunctionTable::learn(Shin::NeuronNetwork::Solutio solution->second.second++; } } - -void Shin::NeuronNetwork::RL::QFunctionTable::learnDelayed(std::vector< std::pair< Shin::NeuronNetwork::Solution, Shin::NeuronNetwork::Problem > >& p, float quality) -{ - for(int i=p.size()-1;i>=0;i--) - { - auto &pair=p[i]; - learn(pair.first,pair.second,quality); - quality*=0.3; - } -} - - Shin::NeuronNetwork::RL::QFunctionNetwork::QFunctionNetwork() : b(nullptr),function(nullptr) { @@ -80,46 +68,6 @@ void Shin::NeuronNetwork::RL::QFunctionNetwork::initialiseNetwork(size_t input, } } -void Shin::NeuronNetwork::RL::QFunctionNetwork::learnDelayed(std::vector< std::pair< Shin::NeuronNetwork::Solution, Shin::NeuronNetwork::Problem > >& p, float quality) -{ - if(quality>0) - { - b->setLearningCoeficient(learningA); -// b->setLearningCoeficient(0.05); - }else - { - b->setLearningCoeficient(learningB); -// b->setLearningCoeficient(0.008); - } - - for(int i=p.size()-1;i>=0;i--) - { - learn(p[i].first,p[i].second,quality); - quality*=0.95; - } -} - -void Shin::NeuronNetwork::RL::QFunctionNetwork::learnDelayed(std::vector< std::pair< Shin::NeuronNetwork::Problem,int> >& p, float quality) // TODO there must be better way -{ - std::vector> q; - register int solSize=0; - if(p.size()>0) - solSize=function->solve(p[0].first).size(); - if (!solSize) - return; - for(size_t i=0;i(s,p[i].first)); - } - learnDelayed(q,quality); -} - - void Shin::NeuronNetwork::RL::QFunctionNetwork::learn(Shin::NeuronNetwork::Solution& s, Shin::NeuronNetwork::Problem& p, float quality) { register int loops=abs(quality)/10; diff --git a/src/NeuronNetwork/Learning/RL/QFunction.h b/src/MachineLearning/QFunction.h similarity index 79% rename from src/NeuronNetwork/Learning/RL/QFunction.h rename to src/MachineLearning/QFunction.h index b262f05..f2adcf0 100644 --- a/src/NeuronNetwork/Learning/RL/QFunction.h +++ b/src/MachineLearning/QFunction.h @@ -3,28 +3,29 @@ #include -#include "../../Solution.h" -#include "../../FeedForward.h" -#include "../BackPropagation.h" -#include "../OpticalBackPropagation.h" +#include "Unsupervised.h" + +#include "../Solution.h" +//#include "../FeedForward.h" +//#include "BackPropagation.h" +//#include "OpticalBackPropagation.h" + namespace Shin { -namespace NeuronNetwork -{ -namespace RL +namespace MachineLearning { class QFunction { public: QFunction(); virtual ~QFunction(); - virtual void learnDelayed(std::vector> &p, float quality)=0; - virtual void learn(Solution &s, Problem &p, float quality)=0; + //virtual void learnDelayed(std::vector> &p, float quality)=0; + //virtual void learn(Solution &s, Problem &p, float quality)=0; protected: float learningCoeficient; }; - +/* class QFunctionTable : public QFunction { public: @@ -83,15 +84,11 @@ namespace RL virtual int getChoice(Problem &p); virtual Solution getSolution(Problem &p) {return function->solve(p);} - void setLearningCoeficient(double ok, double err) {learningA=ok;learningB=err;}; - void opticalBackPropagation() {delete b; b=new Learning::OpticalBackPropagation(*function);}; private: Learning::BackPropagation *b; FeedForward * function; - float learningA=0.05; - float learningB=0.008; }; -} + */ } } diff --git a/src/NeuronNetwork/Learning/QLearning b/src/MachineLearning/QLearning similarity index 100% rename from src/NeuronNetwork/Learning/QLearning rename to src/MachineLearning/QLearning diff --git a/src/MachineLearning/QLearning.cpp b/src/MachineLearning/QLearning.cpp new file mode 100644 index 0000000..222398d --- /dev/null +++ b/src/MachineLearning/QLearning.cpp @@ -0,0 +1,32 @@ +#include "./QLearning" + +void Shin::MachineLearning::QLearning::learnDelayed(std::vector< std::pair< Shin::Problem, int > >& p, float quality) +{ + std::vector> q; + register int solSize=0; + if(p.size()>0) + solSize=getSolution(p[0].first).size(); + if (!solSize) + return; + + for(size_t i=0;i(p[i].first,s)); + } + learnDelayed(q,quality); +} + +void Shin::MachineLearning::QLearning::learnDelayed(std::vector< std::pair >& p, float quality) +{ + for(int i=p.size()-1;i>=0;i--) + { + auto &pair=p[i]; + learn(pair.first,pair.second,quality); + quality*=0.3; + } +} \ No newline at end of file diff --git a/src/MachineLearning/QLearning.h b/src/MachineLearning/QLearning.h new file mode 100644 index 0000000..c4eb477 --- /dev/null +++ b/src/MachineLearning/QLearning.h @@ -0,0 +1,106 @@ +#ifndef _QLEARNING_H_ +#define _QLEARNING_H_ + +#include +#include + +#include "Unsupervised.h" +#include "../NeuralNetwork/FeedForward.h" + +/* + * #include "BackPropagation.h" + * #include "OpticalBackPropagation.h" + * #include "../FeedForward.h" + * #include "Unsupervised.h" + * #include "QFunction.h" +*/ + +/* + * http://www2.econ.iastate.edu/tesfatsi/RLUsersGuide.ICAC2005.pdf + * http://www.autonlab.org/tutorials/rl06.pdf + * http://www.nbu.bg/cogs/events/2000/Readings/Petrov/rltutorial.pdf + * + * http://www.applied-mathematics.net/qlearning/qlearning.html + * http://nn.cs.utexas.edu/downloads/papers/stanley.gecco02_1.pdf + * + * http://stackoverflow.com/questions/740389/good-implementations-of-reinforced-learning + * + * http://stackoverflow.com/questions/10722064/training-a-neural-network-with-reinforcement-learning + * + * http://remi.coulom.free.fr/Thesis/ + * http://remi.coulom.free.fr/Publications/Thesis.pdf + * + * http://link.springer.com/article/10.1007/BF00992696 + * + * http://scholar.google.cz/scholar?start=10&q=reinforcement+learning+feedforward&hl=en&as_sdt=0,5&as_vis=1 + * + */ + +namespace Shin +{ +namespace MachineLearning +{ + class QLearning: Learning + { + public: + inline QLearning() {}; + virtual ~QLearning() {} ; + + QLearning(const QLearning&) =delete; + QLearning& operator=(const QLearning&) =delete; + + virtual void learnDelayed(std::vector> &p, float quality) final; + virtual void learnDelayed(std::vector> &p, float quality) final; + + virtual void learn(Problem &p,Solution &s, const float& quality)=0; + virtual void learn(Problem &p,int action, const float& quality)=0; + + inline virtual void setLearningCoeficient(const float& a) override {setLearningCoeficient(a,a);}; + inline void setLearningCoeficient(const float& ok, const float& err) {learningA=ok;learningB=err;}; + + virtual Solution getSolution(Problem &p)=0; + virtual int getChoice(Problem &p)=0; + protected: + float learningA=0.05; + float learningB=0.008; + + }; + + class QLearningNetwork : public QLearning + { + public: + QLearningNetwork(size_t input, size_t size, size_t actions): QLearning(),function({input,size,actions}),actionsSize(actions) {} + QLearningNetwork(std::initializer_list s): QLearning(),function(s),actionsSize(*s.end()) {} + + QLearningNetwork(const QLearningNetwork&)=delete; + QLearningNetwork operator=(const QLearningNetwork&)=delete; + + virtual void learn(Problem &p,Solution &s, const float& quality) override; + virtual void learn(Problem &p,int action, const float& quality) override; + + virtual Solution getSolution(Problem &p) override {return function.solve(p);} + virtual int getChoice(Problem &p) override; + protected: + Shin::NeuralNetwork::FeedForward function; + size_t actionsSize; + }; + + class QLearningTable : public QLearning + { + public: + QLearningTable():QLearning(),data() {}; + + QLearningTable(const QLearningTable&)=delete; + QLearningTable operator=(const QLearningTable&)=delete; + + virtual void learn(Problem &p,Solution &s, const float& quality) override; + virtual void learn(Problem &p,int action, const float& quality) override; + + virtual Solution getSolution(Problem &p) override; + virtual int getChoice(Problem &p) override; + protected: + std::map>> data; + }; +} +} +#endif \ No newline at end of file diff --git a/src/NeuronNetwork/Learning/Reinforcement b/src/MachineLearning/Reinforcement similarity index 100% rename from src/NeuronNetwork/Learning/Reinforcement rename to src/MachineLearning/Reinforcement diff --git a/src/NeuronNetwork/Learning/Reinforcement.cpp b/src/MachineLearning/Reinforcement.cpp similarity index 100% rename from src/NeuronNetwork/Learning/Reinforcement.cpp rename to src/MachineLearning/Reinforcement.cpp diff --git a/src/NeuronNetwork/Learning/Reinforcement.h b/src/MachineLearning/Reinforcement.h similarity index 100% rename from src/NeuronNetwork/Learning/Reinforcement.h rename to src/MachineLearning/Reinforcement.h diff --git a/src/MachineLearning/Unsupervised.h b/src/MachineLearning/Unsupervised.h new file mode 100644 index 0000000..63fdd34 --- /dev/null +++ b/src/MachineLearning/Unsupervised.h @@ -0,0 +1,18 @@ +#ifndef _UNSUPERVISEDLEARNING_H_ +#define _UNSUPERVISEDLEARNING_H_ + +#include "./Learning.h" + +namespace Shin +{ +namespace MachineLearning +{ + class Unsupervised : public Learning + { + public: + Unsupervised(): Learning() {}; + virtual ~Unsupervised() {}; + }; +} +} +#endif \ No newline at end of file diff --git a/src/NeuronNetwork/FeedForward b/src/NeuralNetwork/FeedForward similarity index 100% rename from src/NeuronNetwork/FeedForward rename to src/NeuralNetwork/FeedForward diff --git a/src/NeuronNetwork/FeedForward.cpp b/src/NeuralNetwork/FeedForward.cpp similarity index 69% rename from src/NeuronNetwork/FeedForward.cpp rename to src/NeuralNetwork/FeedForward.cpp index 101240f..ef566a5 100644 --- a/src/NeuronNetwork/FeedForward.cpp +++ b/src/NeuralNetwork/FeedForward.cpp @@ -1,6 +1,6 @@ #include "FeedForward" -using namespace Shin::NeuronNetwork; +using namespace Shin::NeuralNetwork; FFLayer::~FFLayer() { @@ -21,7 +21,7 @@ FFNeuron& FFLayer::operator[](const size_t& neuron) neurons=new FFNeuron*[layerSize]; for(size_t i=0;i s, double lam): ACyclicNetwork(lam),layers(s.size()) +FeedForward::FeedForward(std::initializer_list s, double lam, FeedForwardInitializer weightInit): ACyclicNetwork(lam),layers(s.size()) { + transfer = new TransferFunction::TransferFunction*[s.size()]; weights= new float**[s.size()]; potentials= new float*[s.size()]; layerSizes= new size_t[s.size()]; - sums= new float*[s.size()]; + outputs= new float*[s.size()]; inputs= new float*[s.size()]; - int i=0; - int prev_size=1; + register int i=0; + register int prev_size=1; for(int layeSize:s) // TODO rename { + transfer[i]= new TransferFunction::Sigmoid(lam); layeSize+=1; if(i==0) { @@ -52,18 +54,18 @@ FeedForward::FeedForward(std::initializer_list< int > s, double lam): ACyclicNet layerSizes[i]=layeSize; weights[i]= new float*[layeSize]; potentials[i]= new float[layeSize]; - sums[i]= new float[layeSize]; + outputs[i]= new float[layeSize]; inputs[i]= new float[layeSize]; potentials[i][0]=1.0; - sums[i][0]=1.0; + outputs[i][0]=1.0; for (int j=1;j4) + TransferFunction::StreamingTransferFunction *function=dynamic_cast(transfer[layer]); + if(prevSize >=4 && function !=nullptr) { __m128 partialSolution; __m128 w; __m128 sols; - __m128 temporaryConst1=_mm_set1_ps(1.0); - __m128 temporaryConstLambda=_mm_set1_ps(-lambda); register size_t alignedPrev=prevSize>16?(prevSize-(prevSize%16)):0; for( size_t j=begin;jweights[layer][j]; + for(register size_t k=0;kweights[layer][j]+k); + w = _mm_load_ps(memory+k); sols = _mm_load_ps(sol+k); w=_mm_mul_ps(w,sols); partialSolution=_mm_add_ps(partialSolution,w); } - /* pre-SSE3 solution - __m128 temp = _mm_add_ps(_mm_movehl_ps(foo128, foo128), foo128); - float x; - _mm_store_ss(&x, _mm_add_ss(temp, _mm_shuffle_ps(temp, 1))); - */ +#ifdef USE_SSE2 //pre-SSE3 solution + partialSolution= _mm_add_ps(_mm_movehl_ps(partialSolution, partialSolution), partialSolution); + partialSolution=_mm_add_ss(partialSolution, _mm_shuffle_ps(partialSolution,partialSolution, 1)); +#else partialSolution = _mm_hadd_ps(partialSolution, partialSolution); partialSolution = _mm_hadd_ps(partialSolution, partialSolution); +#endif _mm_store_ss(inputs[layer]+j,partialSolution); - partialSolution=_mm_mul_ps(temporaryConstLambda,partialSolution); //-lambda*sol[k] - partialSolution=exp_ps(partialSolution); //exp(sols) - partialSolution= _mm_add_ps(partialSolution,temporaryConst1); //1+exp() - partialSolution= _mm_div_ps(temporaryConst1,partialSolution);//1/....*/ + partialSolution=function->operator()(partialSolution); _mm_store_ss(newSolution+j,partialSolution); } }else { for( size_t j=begin;joperator()(tmp); } } } -Solution FeedForward::solve(const Problem& p) +Shin::Solution FeedForward::solve(const Shin::Problem& p) { - register float* sol=sums[0]; + register float* sol=outputs[0]; sol[0]=1; for(size_t i=0;i 1 && (layerSizes[i] > 700 ||prevSize > 700)) // 700 is an guess about actual size, when creating thread has some speedup { std::vector th; size_t s=1; - size_t step =layerSizes[i]/threads; - for(size_t t=1;t<=threads;t++) + register size_t step =layerSizes[i]/threads; + for(size_t t=1;t=layerSizes[i]) - break; th.push_back(std::thread([i,this,newSolution,prevSize,sol](size_t from, size_t to)->void{ solvePart(newSolution,from,to,prevSize,sol,i); - },s,t==threads?layerSizes[i]:s+step));//{} + },s,s+step)); s+=step; } - + solvePart(newSolution,s,layerSizes[i],prevSize,sol,i); for (auto& thr : th) thr.join(); }else @@ -210,7 +206,7 @@ FFLayer& FeedForward::operator[](const size_t& l) ffLayers=new FFLayer*[layers]; for(size_t i=0;i #include #include +#include #include #include @@ -23,30 +26,31 @@ namespace Shin { -namespace NeuronNetwork +namespace NeuralNetwork { class FFNeuron : public Neuron { public: + inline FFNeuron(float &pot, float *w, float &outputF, float &i,float lam,TransferFunction::TransferFunction &fun):function(fun),potential(pot),weights(w),out(outputF),inputs(i),lambda(lam) { } + FFNeuron() = delete; FFNeuron(const FFNeuron&) = delete; FFNeuron& operator=(const FFNeuron&) = delete; - FFNeuron(float &pot, float *w, float &s, float &i,float lam):potential(pot),weights(w),sum(s),inputs(i),lambda(lam) { } - inline virtual float getPotential() const override {return potential;} inline virtual void setPotential(const float& p) override { potential=p;} inline virtual float getWeight(const size_t& i ) const override { return weights[i];} inline virtual void setWeight(const size_t& i,const float &p) override { weights[i]=p; } - inline virtual float output() const override { return sum; } + inline virtual float output() const override { return out; } inline virtual float input() const override { return inputs; } - inline virtual float derivatedOutput() const override { return lambda*output()*(1.0-output()); } + inline virtual float derivatedOutput() const override { return function.derivatedOutput(inputs,out); } protected: + TransferFunction::TransferFunction &function; float &potential; float *weights; - float ∑ + float &out; float &inputs; float lambda; private: @@ -55,7 +59,7 @@ namespace NeuronNetwork class FFLayer: public Layer { public: - FFLayer(size_t s, float *p,float **w,float *su,float *in,float lam): layerSize(s),potentials(p),weights(w),sums(su),inputs(in),lambda(lam) {} + inline FFLayer(size_t s, float *p,float **w,float *out,float *in,float lam,TransferFunction::TransferFunction &fun): function(fun), layerSize(s),potentials(p),weights(w),outputs(out),inputs(in),lambda(lam) {} ~FFLayer(); FFLayer(const FFLayer &) = delete; @@ -64,22 +68,48 @@ namespace NeuronNetwork virtual FFNeuron& operator[](const size_t& layer) override; inline virtual size_t size() const override {return layerSize;}; protected: + TransferFunction::TransferFunction &function; FFNeuron **neurons=nullptr; size_t layerSize; float *potentials; float **weights; - float *sums; + float *outputs; float *inputs; float lambda; }; + /** + * @brief typedef for FeedForward network initializating function + */ + typedef std::function FeedForwardInitializer; + + /** + * @author Tomas Cernik (Tom.Cernik@gmail.com) + * @brief Class representing FeedForward network + * @see ACyclicNetwork + */ class FeedForward:public ACyclicNetwork { public: - FeedForward(std::initializer_list s, double lam=Shin::NeuronNetwork::lambda); + /** + * @brief Constructor for FeedForward + * @param s is initiaizer for layers (it's sizes) + * @param lam is parametr for TransferFunction + * @param weightInit is weight initializer function + */ + FeedForward(std::initializer_list s, double lam=Shin::NeuralNetwork::lambda, + FeedForwardInitializer weightInit= + [](const size_t&, const size_t &, const size_t &)->float{ return 1.0-((float)(rand()%2001))/1000.0;} + ); virtual ~FeedForward(); + /** + * @brief we don't want to allow network to be copied + */ FeedForward(const FeedForward &f) = delete; //TODO + /** + * @brief we don't want to allow network to be copied + */ FeedForward operator=(const FeedForward &f)=delete; virtual Solution solve(const Problem& p) override; @@ -91,10 +121,11 @@ namespace NeuronNetwork FFLayer **ffLayers=nullptr; float ***weights=nullptr; float **potentials=nullptr; - float **sums=nullptr; + float **outputs=nullptr; float **inputs=nullptr; + TransferFunction::TransferFunction **transfer=nullptr; size_t *layerSizes=nullptr; - size_t layers; + size_t layers;/**< Number of layers */ }; } diff --git a/src/NeuronNetwork/Learning/BackPropagation b/src/NeuralNetwork/Learning/BackPropagation similarity index 100% rename from src/NeuronNetwork/Learning/BackPropagation rename to src/NeuralNetwork/Learning/BackPropagation diff --git a/src/NeuronNetwork/Learning/BackPropagation.cpp b/src/NeuralNetwork/Learning/BackPropagation.cpp similarity index 82% rename from src/NeuronNetwork/Learning/BackPropagation.cpp rename to src/NeuralNetwork/Learning/BackPropagation.cpp index 8643919..20b896a 100644 --- a/src/NeuronNetwork/Learning/BackPropagation.cpp +++ b/src/NeuralNetwork/Learning/BackPropagation.cpp @@ -1,12 +1,6 @@ #include "./BackPropagation" -#include -Shin::NeuronNetwork::Learning::BackPropagation::BackPropagation(FeedForward &n): Supervised(n) -{ - -} - -Shin::NeuronNetwork::Learning::BackPropagation::~BackPropagation() +Shin::NeuralNetwork::Learning::BackPropagation::~BackPropagation() { if(deltas!=nullptr) { @@ -16,7 +10,7 @@ Shin::NeuronNetwork::Learning::BackPropagation::~BackPropagation() delete[] deltas; } -void Shin::NeuronNetwork::Learning::BackPropagation::propagate(const Shin::NeuronNetwork::Solution& expectation) +void Shin::NeuralNetwork::Learning::BackPropagation::propagate(const Shin::Solution& expectation) { if(deltas==nullptr) @@ -93,9 +87,9 @@ void Shin::NeuronNetwork::Learning::BackPropagation::propagate(const Shin::Neuro } -float Shin::NeuronNetwork::Learning::BackPropagation::teach(const Shin::NeuronNetwork::Problem& p, const Shin::NeuronNetwork::Solution& solution) +float Shin::NeuralNetwork::Learning::BackPropagation::teach(const Shin::Problem& p, const Shin::Solution& solution) { - Shin::NeuronNetwork::Solution a=network.solve(p); + Shin::Solution a=network.solve(p); double error=calculateError(solution,a); Solution s; diff --git a/src/NeuronNetwork/Learning/BackPropagation.h b/src/NeuralNetwork/Learning/BackPropagation.h similarity index 67% rename from src/NeuronNetwork/Learning/BackPropagation.h rename to src/NeuralNetwork/Learning/BackPropagation.h index 8d00fcb..acf3cf5 100644 --- a/src/NeuronNetwork/Learning/BackPropagation.h +++ b/src/NeuralNetwork/Learning/BackPropagation.h @@ -2,11 +2,12 @@ #define _BACK_PROPAGATION_H_ #include +#include #include -#include "../Solution.h" +#include "../../Solution.h" #include "../FeedForward.h" -#include "Supervised" +#include "Learning.h" /* * http://sydney.edu.au/engineering/it/~comp4302/ann4-3s.pdf @@ -22,22 +23,24 @@ namespace Shin { -namespace NeuronNetwork +namespace NeuralNetwork { namespace Learning { - class BackPropagation : public Supervised + class BackPropagation : public Learning { public: - BackPropagation(FeedForward &n); + BackPropagation(FeedForward &n): Learning(), network(n) {} virtual ~BackPropagation(); - BackPropagation(const Shin::NeuronNetwork::Learning::BackPropagation&) =delete; - BackPropagation operator=(const Shin::NeuronNetwork::Learning::BackPropagation&) =delete; + BackPropagation(const Shin::NeuralNetwork::Learning::BackPropagation&) =delete; + BackPropagation operator=(const Shin::NeuralNetwork::Learning::BackPropagation&) =delete; + + float teach(const Problem &p,const Solution &solution); + virtual void propagate(const Solution& expectation); - float teach(const Shin::NeuronNetwork::Problem &p,const Solution &solution); - virtual void propagate(const Shin::NeuronNetwork::Solution& expectation); protected: + FeedForward &network; inline virtual float correction(const float& expected, const float& computed) { return expected - computed;}; float **deltas=nullptr; diff --git a/src/NeuralNetwork/Learning/Learning.cpp b/src/NeuralNetwork/Learning/Learning.cpp new file mode 100644 index 0000000..db461fc --- /dev/null +++ b/src/NeuralNetwork/Learning/Learning.cpp @@ -0,0 +1,21 @@ +#include "Learning.h" + +float Shin::NeuralNetwork::Learning::Learning::calculateError(const Shin::Solution& expectation, const Shin::Solution& solution) +{ + register float a=0; + for (size_t i=0;i> &set) +{ + double error=0; + for (register size_t i=0;i -#include #include -#include "../Solution.h" +#include "../../Solution.h" #include "../FeedForward.h" namespace Shin { -namespace NeuronNetwork +namespace NeuralNetwork { namespace Learning { const float LearningCoeficient=0.4; - class Supervised + class Learning { public: - Supervised() =delete; - Supervised(FeedForward &n) : network(n) {}; - virtual ~Supervised() {}; - - float calculateError(const Solution &expectation,const Solution &solution); - virtual float teach(const Shin::NeuronNetwork::Problem &p,const Solution &solution)=0; - virtual float teachSet(const std::vector> &set) final; + Learning() {}; + inline virtual ~Learning() {}; inline virtual void setLearningCoeficient (const float& coef) { learningCoeficient=coef; }; @@ -35,9 +28,12 @@ namespace Learning inline virtual void disableNoise() final {noise=0;} inline virtual void setNoiseSize(const unsigned& milipercents) final { noiseSize=milipercents; } + float calculateError(const Solution &expectation,const Solution &solution); + virtual float teach(const Problem &p,const Solution &solution)=0; + virtual float teachSet(const std::vector> &set) final; + protected: - FeedForward &network; - float learningCoeficient=Shin::NeuronNetwork::Learning::LearningCoeficient; + float learningCoeficient=LearningCoeficient; bool allowThreads=0; bool noise=0; unsigned noiseSize=500; diff --git a/src/NeuronNetwork/Learning/OpticalBackPropagation b/src/NeuralNetwork/Learning/OpticalBackPropagation similarity index 100% rename from src/NeuronNetwork/Learning/OpticalBackPropagation rename to src/NeuralNetwork/Learning/OpticalBackPropagation diff --git a/src/NeuronNetwork/Learning/OpticalBackPropagation.cpp b/src/NeuralNetwork/Learning/OpticalBackPropagation.cpp similarity index 75% rename from src/NeuronNetwork/Learning/OpticalBackPropagation.cpp rename to src/NeuralNetwork/Learning/OpticalBackPropagation.cpp index e325ae4..ed51225 100644 --- a/src/NeuronNetwork/Learning/OpticalBackPropagation.cpp +++ b/src/NeuralNetwork/Learning/OpticalBackPropagation.cpp @@ -1,6 +1,6 @@ #include "./OpticalBackPropagation" -float Shin::NeuronNetwork::Learning::OpticalBackPropagation::correction(const float& expected, const float& computed) +float Shin::NeuralNetwork::Learning::OpticalBackPropagation::correction(const float& expected, const float& computed) { register float tmp=(expected-computed); register float ret=1+exp(tmp*tmp); diff --git a/src/NeuronNetwork/Learning/OpticalBackPropagation.h b/src/NeuralNetwork/Learning/OpticalBackPropagation.h similarity index 95% rename from src/NeuronNetwork/Learning/OpticalBackPropagation.h rename to src/NeuralNetwork/Learning/OpticalBackPropagation.h index d4fd321..13f3083 100644 --- a/src/NeuronNetwork/Learning/OpticalBackPropagation.h +++ b/src/NeuralNetwork/Learning/OpticalBackPropagation.h @@ -10,7 +10,7 @@ namespace Shin { -namespace NeuronNetwork +namespace NeuralNetwork { namespace Learning { diff --git a/src/NeuralNetwork/Makefile b/src/NeuralNetwork/Makefile new file mode 100644 index 0000000..f66ebcd --- /dev/null +++ b/src/NeuralNetwork/Makefile @@ -0,0 +1,28 @@ +OBJFILES=\ + FeedForward.o\ + Learning/Learning.o Learning/BackPropagation.o Learning/OpticalBackPropagation.o ../sse_mathfun.o + +LINKFILES= + +LIBNAME=NeuralNetwork + +include ../../Makefile.const + +all: lib + +../sse_mathfun.o: ../sse_mathfun.cpp ../sse_mathfun.h + make -C ../ + +lib: $(LIBNAME).so $(LIBNAME).a + +$(LIBNAME).so: $(OBJFILES) + $(CXX) -shared $(CXXFLAGS) $(OBJFILES) $(LINKFILES) -o $(LIBNAME).so + +$(LIBNAME).a: $(OBJFILES) ./Neuron.h ./Network.h ../Solution.h ../Problem.h ./TransferFunction/TransferFunction.h ./TransferFunction/Sigmoid.h + rm -f $(LIBNAME).a # create new library + ar rcv $(LIBNAME).a $(OBJFILES) $(LINKFILES) + ranlib $(LIBNAME).a + nm --demangle $(LIBNAME).a > $(LIBNAME).nm + +clean: + @rm -f ./*.o ./*.so ./*.a ./*.nm ./*/*.o diff --git a/src/NeuronNetwork/Network b/src/NeuralNetwork/Network similarity index 100% rename from src/NeuronNetwork/Network rename to src/NeuralNetwork/Network diff --git a/src/NeuralNetwork/Network.h b/src/NeuralNetwork/Network.h new file mode 100644 index 0000000..39f3ba7 --- /dev/null +++ b/src/NeuralNetwork/Network.h @@ -0,0 +1,128 @@ +#ifndef _S_NN_NN_H_ +#define _S_NN_NN_H_ + +#include +#include +#include + +#include "../Problem.h" +#include "../Solution.h" +#include "Neuron.h" + +namespace Shin +{ +namespace NeuralNetwork +{ + /** + * @brief Default value for lambda + */ + const float lambda=0.8; + + /** + * @author Tomas Cernik (Tom.Cernik@gmail.com) + * @brief Abstract class for all Layers of neurons + */ + class Layer + { + public: + + virtual ~Layer() {}; + + /** + * @brief This is a virtual function for selecting neuron + * @param neuron is position in layer + * @returns Specific neuron + */ + + virtual Neuron& operator[](const size_t& neuron)=0; + /** + * @returns Size of layer + */ + virtual size_t size() const=0; + }; + +/** + * @author Tomas Cernik (Tom.Cernik@gmail.com) + * @brief Abstract model of simple Network + */ + class Network + { + public: + /** + * @brief Constructor for Network + * @param lam is parametr for many TransferFunctions + */ + inline Network(double lam):lambda(lam) {}; + + /** + * @brief Virtual destructor for Network + */ + virtual ~Network() {}; + + /** + * @brief This is a virtual function for all networks + * @param p is a Problem to be solved + * @returns Solution of Network for Problem + */ + virtual Solution solve(const Problem&p)=0; + + /** + * @brief Getter of layer + * @param layer is position fo layer + * @returns Retruns specified layer + */ + virtual Layer& operator[](const size_t &layer)=0; + + /** + * @brief Returns parametr for TransferFunctions + * @returns lambda (parametr for TransferFunctions) + */ + inline float getLambda() const {return lambda;} + + /** + * @param t is number of threads, if set to 0 or 1 then threading is disabled + * @brief Enables or disables Threaded computing of ANN + */ + + inline virtual void setThreads(const unsigned&t) final {threads=t;} + + protected: + + /** + * @brief Parametr for TransferFunctions + */ + float lambda; + + /** + * @brief Number of threads used by network + */ + unsigned threads=1; + }; + + /** + * @author Tomas Cernik (Tom.Cernik@gmail.com) + * @brief Abstract class for all Acyclic networks + */ + + class ACyclicNetwork : public Network + { + public: + + /** + * @brief Constructor for Acyclic network + * @param lam is parametr for many TransferFunctions + */ + inline ACyclicNetwork(double lam):Network(lam) {}; + + /** + * @brief Returns size of ANN in layer + * @returns Return number of layer in network + */ + virtual size_t size() const=0; + protected: + private: + }; + +} +} +#endif \ No newline at end of file diff --git a/src/NeuronNetwork/Neuron b/src/NeuralNetwork/Neuron similarity index 100% rename from src/NeuronNetwork/Neuron rename to src/NeuralNetwork/Neuron diff --git a/src/NeuralNetwork/Neuron.h b/src/NeuralNetwork/Neuron.h new file mode 100644 index 0000000..6cb6d7c --- /dev/null +++ b/src/NeuralNetwork/Neuron.h @@ -0,0 +1,64 @@ +#ifndef _S_NN_NEURON_H_ +#define _S_NN_NEURON_H_ + +#include + +namespace Shin +{ +namespace NeuralNetwork +{ + /** + * @author Tomas Cernik (Tom.Cernik@gmail.com) + * @brief Abstract class of neuron. All Neuron classes should derive from this on + */ + class Neuron + { + public: + /** + * @brief virtual destructor for Neuron + */ + virtual ~Neuron() {}; + + /** + * @brief Returns potential of neuron + */ + virtual float getPotential() const =0; + + /** + * @brief Sets potential of neuron + * @param p is new pontential + */ + virtual void setPotential(const float &p) =0; + + /** + * @brief Returns weight for w-th input neuron + * @param w is weight of neuron number w + */ + virtual float getWeight(const size_t &w) const =0; + + /** + * @brief Sets weight + * @param i is number of neuron + * @param p is new weight for input neuron i + */ + virtual void setWeight(const size_t& i ,const float &p) =0; + + /** + * @brief Returns output of neuron + */ + virtual float output() const =0; + + /** + * @brief Returns input of neuron + */ + virtual float input() const=0; + + /** + * @brief Returns value for derivation of activation function + */ + virtual float derivatedOutput() const=0; + protected: + }; +} +} +#endif \ No newline at end of file diff --git a/src/NeuralNetwork/Perceptron.h b/src/NeuralNetwork/Perceptron.h new file mode 100644 index 0000000..2467165 --- /dev/null +++ b/src/NeuralNetwork/Perceptron.h @@ -0,0 +1,26 @@ +#ifndef _S_NN_PERCEP_H_ +#define _S_NN_PERCEP_H_ + +#include "./FeedForward" +#include "TransferFunction/Heaviside.h" + +namespace Shin +{ +namespace NeuralNetwork +{ + class Perceptron:public FeedForward + { + public: + Perceptron(const size_t &inputSize, const size_t &outputSize):FeedForward({inputSize,outputSize}) + { + for(int i=0;ik ? 1.0f : 0.0f; }; + protected: + float lambda; + + }; +} +} +} +#endif \ No newline at end of file diff --git a/src/NeuralNetwork/TransferFunction/HyperbolicTangent.h b/src/NeuralNetwork/TransferFunction/HyperbolicTangent.h new file mode 100644 index 0000000..3ad1aa8 --- /dev/null +++ b/src/NeuralNetwork/TransferFunction/HyperbolicTangent.h @@ -0,0 +1,24 @@ +#ifndef __TRAN_HYPTAN_H_ +#define __TRAN_HYPTAN_H_ + +#include "./TransferFunction.h" + +namespace Shin +{ +namespace NeuralNetwork +{ +namespace TransferFunction +{ + class HyperbolicTangent: public TransferFunction + { + public: + HyperbolicTangent(const float& lam=1):lambda(lam) {} + inline virtual float derivatedOutput(const float&,const float &output) override { return lambda*(1-output*output); } + inline virtual float operator()(const float &x) override { return tanh(lambda*x); }; + protected: + float lambda; + }; +} +} +} +#endif \ No newline at end of file diff --git a/src/NeuralNetwork/TransferFunction/Sigmoid.h b/src/NeuralNetwork/TransferFunction/Sigmoid.h new file mode 100644 index 0000000..0152d8b --- /dev/null +++ b/src/NeuralNetwork/TransferFunction/Sigmoid.h @@ -0,0 +1,33 @@ +#ifndef __TRAN_SIGMOID_H_ +#define __TRAN_SIGMOID_H_ + +#include "./StreamingTransferFunction.h" + +namespace Shin +{ +namespace NeuralNetwork +{ +namespace TransferFunction +{ + class Sigmoid: public StreamingTransferFunction + { + public: + Sigmoid(const float lambdaP = 0.8): lambda(lambdaP) {} + inline virtual float derivatedOutput(const float&,const float &output) override { return lambda*output*(1.0f-output); } + inline virtual float operator()(const float &x) override { return 1.0f / (1.0f +exp(-lambda*x) ); }; + inline virtual __m128 operator()(__m128 x) override { + x=_mm_mul_ps(temporaryConstLambda,x); //-lambda*sol[k] + x=exp_ps(x); //exp(x) + x= _mm_add_ps(x,temporaryConst1); //1+exp() + x= _mm_div_ps(temporaryConst1,x);//1/.... + return x; + } + protected: + float lambda; + __m128 temporaryConst1=_mm_set1_ps(1.0); + __m128 temporaryConstLambda=_mm_set1_ps(-lambda); + }; +} +} +} +#endif \ No newline at end of file diff --git a/src/NeuralNetwork/TransferFunction/StreamingTransferFunction.h b/src/NeuralNetwork/TransferFunction/StreamingTransferFunction.h new file mode 100644 index 0000000..b998c49 --- /dev/null +++ b/src/NeuralNetwork/TransferFunction/StreamingTransferFunction.h @@ -0,0 +1,26 @@ +#ifndef __STREAMINGTRAN_FUN_H_ +#define __STREAMINGTRAN_FUN_H_ + +#include + +#include "../../sse_mathfun.h" + +#include "./TransferFunction.h" + +namespace Shin +{ +namespace NeuralNetwork +{ +namespace TransferFunction +{ + class StreamingTransferFunction : public TransferFunction + { + public: + virtual float derivatedOutput(const float &input,const float &output)=0; + virtual float operator()(const float &x)=0; + virtual __m128 operator()(__m128)=0; // it must be overriden to be used! + }; +} +} +} +#endif \ No newline at end of file diff --git a/src/NeuralNetwork/TransferFunction/TransferFunction.h b/src/NeuralNetwork/TransferFunction/TransferFunction.h new file mode 100644 index 0000000..1a6d069 --- /dev/null +++ b/src/NeuralNetwork/TransferFunction/TransferFunction.h @@ -0,0 +1,22 @@ +#ifndef __TRAN_FUN_H_ +#define __TRAN_FUN_H_ + +#include + +namespace Shin +{ +namespace NeuralNetwork +{ +namespace TransferFunction +{ + class TransferFunction + { + public: + virtual ~TransferFunction() {} + virtual float derivatedOutput(const float &input,const float &output)=0; + virtual float operator()(const float &x)=0; + }; +} +} +} +#endif \ No newline at end of file diff --git a/src/NeuronNetwork/Learning/QLearning.cpp b/src/NeuronNetwork/Learning/QLearning.cpp deleted file mode 100644 index 915de6c..0000000 --- a/src/NeuronNetwork/Learning/QLearning.cpp +++ /dev/null @@ -1,38 +0,0 @@ -#include "./QLearning" - -Shin::NeuronNetwork::Learning::QLearning::QLearning(size_t input, size_t size, size_t choices):fun() -{ - fun.initialiseNetwork(input,size,choices); -} - -Shin::NeuronNetwork::Learning::QLearning::~QLearning() -{ - -} - -void Shin::NeuronNetwork::Learning::QLearning::learnDelayed(std::vector< std::pair< Shin::NeuronNetwork::Solution, Shin::NeuronNetwork::Problem > >& p, float quality) -{ - fun.learnDelayed(p,quality); -} - -void Shin::NeuronNetwork::Learning::QLearning::learnDelayed(std::vector< std::pair< Shin::NeuronNetwork::Problem,int > >& p, float quality) -{ - fun.learnDelayed(p,quality); -} - -void Shin::NeuronNetwork::Learning::QLearning::learn(Shin::NeuronNetwork::Solution& s, Shin::NeuronNetwork::Problem& p, float quality) -{ - fun.learn(s,p,quality); -} - - -void Shin::NeuronNetwork::Learning::QLearning::learn(Shin::NeuronNetwork::Problem& s, int action, float quality) -{ - fun.learn(s,action,quality); -} - - -int Shin::NeuronNetwork::Learning::QLearning::getChoice(Shin::NeuronNetwork::Problem& p) -{ - return fun.getChoice(p); -} diff --git a/src/NeuronNetwork/Learning/QLearning.h b/src/NeuronNetwork/Learning/QLearning.h deleted file mode 100644 index 1a0a239..0000000 --- a/src/NeuronNetwork/Learning/QLearning.h +++ /dev/null @@ -1,69 +0,0 @@ -#ifndef _QLEARNING_H_ -#define _QLEARNING_H_ - -#include -#include - -#include "BackPropagation.h" -#include "OpticalBackPropagation.h" -#include "../Problem.h" -#include "../FeedForward.h" -#include "Unsupervised.h" -#include "RL/QFunction.h" - -/* - * http://www2.econ.iastate.edu/tesfatsi/RLUsersGuide.ICAC2005.pdf - * http://www.autonlab.org/tutorials/rl06.pdf - * http://www.nbu.bg/cogs/events/2000/Readings/Petrov/rltutorial.pdf - * - * http://www.applied-mathematics.net/qlearning/qlearning.html - * http://nn.cs.utexas.edu/downloads/papers/stanley.gecco02_1.pdf - * - * http://stackoverflow.com/questions/740389/good-implementations-of-reinforced-learning - * - * http://stackoverflow.com/questions/10722064/training-a-neural-network-with-reinforcement-learning - * - * http://remi.coulom.free.fr/Thesis/ - * http://remi.coulom.free.fr/Publications/Thesis.pdf - * - * http://link.springer.com/article/10.1007/BF00992696 - * - * http://scholar.google.cz/scholar?start=10&q=reinforcement+learning+feedforward&hl=en&as_sdt=0,5&as_vis=1 - * - */ - -namespace Shin -{ -namespace NeuronNetwork -{ -namespace Learning -{ - class QLearning - { - public: - QLearning(size_t input, size_t size, size_t choices); - ~QLearning(); - - QLearning(const QLearning&) =delete; - QLearning& operator=(const QLearning&) =delete; - - void learnDelayed(std::vector> &p, float quality); - void learnDelayed(std::vector> &p, float quality); - void learn(Solution &s, Problem &p, float quality); - void learn(Problem &p,int action, float quality); - - void learnNetwork(double maxError=0.01); - void learningCoeficient(double t); - - void initialise(size_t input, size_t size,size_t choices); - int getChoice(Problem &p); - Solution getSolution(Problem &p) {return fun.getSolution(p);} - void setLearningCoeficient(double ok, double err) {fun.setLearningCoeficient(ok,err);}; - void opticalBackPropagation() {fun.opticalBackPropagation();}; - protected: - RL::QFunctionNetwork fun; - }; -} -} -} -#endif \ No newline at end of file diff --git a/src/NeuronNetwork/Learning/Supervised b/src/NeuronNetwork/Learning/Supervised deleted file mode 120000 index 5aa1555..0000000 --- a/src/NeuronNetwork/Learning/Supervised +++ /dev/null @@ -1 +0,0 @@ -./Supervised.h \ No newline at end of file diff --git a/src/NeuronNetwork/Learning/Supervised.cpp b/src/NeuronNetwork/Learning/Supervised.cpp deleted file mode 100644 index e4c9a8c..0000000 --- a/src/NeuronNetwork/Learning/Supervised.cpp +++ /dev/null @@ -1,20 +0,0 @@ -#include "./Supervised" -float Shin::NeuronNetwork::Learning::Supervised::calculateError(const Shin::NeuronNetwork::Solution& expectation, const Shin::NeuronNetwork::Solution& solution) -{ - register float a=0; - for (size_t i=0;i> &set) -{ - double error=0; - for (register size_t i=0;i -#include - -#include "../Solution.h" -#include "../FeedForward.h" - -namespace Shin -{ -namespace NeuronNetwork -{ -namespace Learning -{ - class Unsupervised - { - public: - Unsupervised(FeedForward &n): network(n) {}; - virtual ~Unsupervised() {}; - - Unsupervised() =delete; - protected: - FeedForward &network; - }; -} -} -} -#endif \ No newline at end of file diff --git a/src/NeuronNetwork/Network.h b/src/NeuronNetwork/Network.h deleted file mode 100644 index d581f73..0000000 --- a/src/NeuronNetwork/Network.h +++ /dev/null @@ -1,54 +0,0 @@ -#ifndef _S_NN_NN_H_ -#define _S_NN_NN_H_ - -#include "Problem" -#include "Solution" -#include "Neuron" - -#include -#include -#include - -#include - -namespace Shin -{ -namespace NeuronNetwork -{ - const float lambda=0.8; - class Layer - { - public: - virtual ~Layer() {}; - virtual Neuron& operator[](const size_t& neuron)=0; - virtual size_t size() const=0; - }; - - class Network - { - public: - inline Network(double lam):lambda(lam) {}; - virtual ~Network() {}; - - virtual Solution solve(const Problem&)=0; - virtual Layer& operator[](const size_t &layer)=0; - inline float getLambda() const {return lambda;} - - inline virtual void setThreads(const unsigned&t) final {threads=t;} - protected: - float lambda; - unsigned threads=1; - }; - - class ACyclicNetwork : public Network - { - public: - inline ACyclicNetwork(double lam):Network(lam) {}; - virtual size_t size() const=0; - protected: - private: - }; - -} -} -#endif \ No newline at end of file diff --git a/src/NeuronNetwork/Neuron.h b/src/NeuronNetwork/Neuron.h deleted file mode 100644 index c25d30b..0000000 --- a/src/NeuronNetwork/Neuron.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef _S_NN_NEURON_H_ -#define _S_NN_NEURON_H_ - -#include - -namespace Shin -{ -namespace NeuronNetwork -{ - class Neuron - { - public: - Neuron() {}; - virtual ~Neuron() {}; - virtual float getPotential() const =0; - virtual void setPotential(const float &p) =0; - - virtual float getWeight(const size_t&) const =0; - virtual void setWeight(const size_t& i,const float &p) =0; - - virtual float output() const =0; - virtual float input() const=0; - virtual float derivatedOutput() const=0; - protected: - }; -} -} -#endif \ No newline at end of file diff --git a/src/NeuronNetwork/Problem b/src/Problem similarity index 100% rename from src/NeuronNetwork/Problem rename to src/Problem diff --git a/src/NeuronNetwork/Problem.h b/src/Problem.h similarity index 78% rename from src/NeuronNetwork/Problem.h rename to src/Problem.h index 0650c62..aeb4abd 100644 --- a/src/NeuronNetwork/Problem.h +++ b/src/Problem.h @@ -6,19 +6,16 @@ #include "IO.h" namespace Shin -{ -namespace NeuronNetwork { class Problem : public IO { public: Problem(): IO() {}; - Problem(std::vector &p):IO(p) {}; + Problem(const std::vector &p):IO(p) {}; Problem(const std::initializer_list &a) : IO(a) {}; protected: private: }; } -} #endif diff --git a/src/NeuronNetwork/Solution b/src/Solution similarity index 100% rename from src/NeuronNetwork/Solution rename to src/Solution diff --git a/src/NeuronNetwork/Solution.h b/src/Solution.h similarity index 94% rename from src/NeuronNetwork/Solution.h rename to src/Solution.h index eef8266..63c16b0 100644 --- a/src/NeuronNetwork/Solution.h +++ b/src/Solution.h @@ -5,8 +5,6 @@ #include "IO.h" namespace Shin -{ -namespace NeuronNetwork { class Solution : public IO { @@ -19,7 +17,6 @@ namespace NeuronNetwork inline void push_back(const float &a) {data.push_back(a);}; }; } -} #endif diff --git a/tests/Makefile b/tests/Makefile index 3a5b029..fe09c49 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -16,7 +16,7 @@ NN_TESTS= $(NN_TESTEABLE) nn-pong ALL_TESTS=$(NN_TESTEABLE) $(GEN_TESTS) -LIBS=$(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuronNetwork.a +LIBS=$(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuralNetwork.a #LIBS=-lGenetics.so -lNeuronNetwork CXXFLAGS += -I$(LIB_DIR) @@ -30,10 +30,10 @@ test: all @for i in $(ALL_TESTS); do echo -n ./$$i; echo -n " - "; ./$$i; echo ""; done g-%: g-%.cpp $(LIB_DIR)/Genetics.a - $(CXX) $(CXXFLAGS) $(OPTIMALIZATION) -o $@ $< $ $(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuronNetwork.a -lm + $(CXX) $(CXXFLAGS) $(OPTIMALIZATION) -o $@ $< $ $(LIB_DIR)/Genetics.a $(LIB_DIR)/NeuralNetwork.a -lm -nn-%: nn-%.cpp $(LIB_DIR)/NeuronNetwork.a - $(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuronNetwork.a -lm +nn-%: nn-%.cpp $(LIB_DIR)/NeuralNetwork.a + $(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuralNetwork.a -lm nn-pong: ./nn-pong.cpp $(LIB_DIR)/NeuronNetwork.a $(CXX) $(CXXFLAGS) -o $@ $< $ $(LIB_DIR)/NeuronNetwork.a -lm -lalleg -lGL diff --git a/tests/nn-01.cpp b/tests/nn-01.cpp index 9d57908..4420c50 100644 --- a/tests/nn-01.cpp +++ b/tests/nn-01.cpp @@ -1,13 +1,12 @@ -#include "../src/NeuronNetwork/FeedForward" -#include "../src/NeuronNetwork/FeedForward" -#include "../src/NeuronNetwork/Learning/BackPropagation" +#include "../src/NeuralNetwork/FeedForward" +#include "../src/NeuralNetwork/Learning/BackPropagation" #include #include //typedef Shin::NeuronNetwork::Problem X; -class X: public Shin::NeuronNetwork::Problem +class X: public Shin::Problem { public: X(const X& a) :Problem(a) {} @@ -17,18 +16,18 @@ class X: public Shin::NeuronNetwork::Problem int main(int argc,char**) { srand(time(NULL)); - std::vector s; + std::vector s; std::vector p; // - s.push_back(Shin::NeuronNetwork::Solution(std::vector({1}))); + s.push_back(Shin::Solution(std::vector({1}))); p.push_back(X(std::vector({0}))); - s.push_back(Shin::NeuronNetwork::Solution(std::vector({0}))); + s.push_back(Shin::Solution(std::vector({0}))); p.push_back(X(std::vector({1}))); - Shin::NeuronNetwork::FeedForward q({1,5000,5000,15000,2}); - Shin::NeuronNetwork::Learning::BackPropagation b(q); + Shin::NeuralNetwork::FeedForward q({1,5000,5000,15000,2}); + Shin::NeuralNetwork::Learning::BackPropagation b(q); if(argc > 1) { std::cerr << "THREADING\n"; diff --git a/tests/nn-02.cpp b/tests/nn-02.cpp index 2fc4497..de91a4a 100644 --- a/tests/nn-02.cpp +++ b/tests/nn-02.cpp @@ -1,10 +1,9 @@ -#include "../src/NeuronNetwork/FeedForward" -#include "../src/NeuronNetwork/FeedForward.h" +#include "../src/NeuralNetwork/FeedForward" #include -class X: public Shin::NeuronNetwork::Problem +class X: public Shin::Problem { protected: std::vector representation() const @@ -15,8 +14,8 @@ class X: public Shin::NeuronNetwork::Problem int main() { - Shin::NeuronNetwork::FeedForward n({2,4,2}); - Shin::NeuronNetwork::FeedForward nq({2,4,2}); + Shin::NeuralNetwork::FeedForward n({2,4,2}); + Shin::NeuralNetwork::FeedForward nq({2,4,2}); if(n[1].size() != 4) { std::cout << "Actual size:" << n[0].size(); @@ -34,8 +33,8 @@ int main() std::cout << "Potential: " << n[2][0].getPotential() << "\n"; std::cout << "Potential: " << nq[2][0].getPotential() << "\n"; - Shin::NeuronNetwork::Solution s =n.solve(X()); - Shin::NeuronNetwork::Solution sq =nq.solve(X()); + Shin::Solution s =n.solve(X()); + Shin::Solution sq =nq.solve(X()); if(s.size()!=2) { diff --git a/tests/nn-03.cpp b/tests/nn-03.cpp index 1d4b6b2..ed0c384 100644 --- a/tests/nn-03.cpp +++ b/tests/nn-03.cpp @@ -1,11 +1,10 @@ -#include "../src/NeuronNetwork/FeedForward" -#include "../src/NeuronNetwork/FeedForward" -#include "../src/NeuronNetwork/Learning/BackPropagation" +#include "../src/NeuralNetwork/FeedForward" +#include "../src/NeuralNetwork/Learning/BackPropagation" #include #include -class X: public Shin::NeuronNetwork::Problem +class X: public Shin::Problem { public: X(const X& a) :Problem(),q(a.q) {} @@ -20,21 +19,21 @@ class X: public Shin::NeuronNetwork::Problem int main() { - std::vector s; + std::vector s; std::vector p; // - s.push_back(Shin::NeuronNetwork::Solution(std::vector({0}))); + s.push_back(Shin::Solution(std::vector({0}))); p.push_back(X(std::vector({1,0}))); - s.push_back(Shin::NeuronNetwork::Solution(std::vector({0}))); + s.push_back(Shin::Solution(std::vector({0}))); p.push_back(X(std::vector({0,1}))); - s.push_back(Shin::NeuronNetwork::Solution(std::vector({0}))); + s.push_back(Shin::Solution(std::vector({0}))); p.push_back(X(std::vector({0,0}))); - s.push_back(Shin::NeuronNetwork::Solution(std::vector({1}))); + s.push_back(Shin::Solution(std::vector({1}))); p.push_back(X(std::vector({1,1}))); - Shin::NeuronNetwork::FeedForward q({2,4,1}); - Shin::NeuronNetwork::Learning::BackPropagation b(q); + Shin::NeuralNetwork::FeedForward q({2,4,1}); + Shin::NeuralNetwork::Learning::BackPropagation b(q); b.setLearningCoeficient(10); for(int i=0;i<4;i++) diff --git a/tests/nn-04.cpp b/tests/nn-04.cpp index 9f470e5..15c8894 100644 --- a/tests/nn-04.cpp +++ b/tests/nn-04.cpp @@ -1,7 +1,7 @@ -#include "../src/NeuronNetwork/FeedForward" +#include "../src/NeuralNetwork/FeedForward" #include -class X: public Shin::NeuronNetwork::Problem +class X: public Shin::Problem { public: X(bool x,bool y):Problem() {data.push_back(x);data.push_back(y);} }; @@ -10,7 +10,7 @@ int main() { srand(time(NULL)); int lm=5; - Shin::NeuronNetwork::FeedForward net({2,lm,1}); + Shin::NeuralNetwork::FeedForward net({2,lm,1}); bool x=1; int prev_err=0; int err=0; @@ -47,7 +47,7 @@ int main() { bool x= rand()%2; bool y=rand()%2; - Shin::NeuronNetwork::Solution s =net.solve(X(x,y)); + Shin::Solution s =net.solve(X(x,y)); if(s[0]!= (x xor y)) err++; } diff --git a/tests/nn-bp-sppeed.cpp b/tests/nn-bp-sppeed.cpp index cfb472d..84b2de7 100644 --- a/tests/nn-bp-sppeed.cpp +++ b/tests/nn-bp-sppeed.cpp @@ -1,38 +1,31 @@ -#include "../src/NeuronNetwork/FeedForward" -#include "../src/NeuronNetwork/FeedForward" -#include "../src/NeuronNetwork/Learning/BackPropagation" +#include "../src/NeuralNetwork/FeedForward" +#include "../src/NeuralNetwork/Learning/BackPropagation" #include #include -class X: public Shin::NeuronNetwork::Problem +class X: public Shin::Problem { public: - X(const X& a) :q(a.q) {} - X(const std::vector &a):q(a) {} - std::vector representation() const - { - return q; - } - protected: - std::vector q; + X(const X& a) :Problem(a.data) {} + X(const std::vector &a):Problem(a) {} }; int main(int argc, char**) { srand(time(NULL)); - std::vector s; + std::vector s; std::vector p; // - s.push_back(Shin::NeuronNetwork::Solution(std::vector({1}))); + s.push_back(Shin::Solution(std::vector({1}))); p.push_back(X(std::vector({0}))); - s.push_back(Shin::NeuronNetwork::Solution(std::vector({0}))); + s.push_back(Shin::Solution(std::vector({0}))); p.push_back(X(std::vector({1}))); - Shin::NeuronNetwork::FeedForward q({1,5000,5000,5000,1}); - Shin::NeuronNetwork::Learning::BackPropagation b(q); + Shin::NeuralNetwork::FeedForward q({1,5000,5000,5000,1}); + Shin::NeuralNetwork::Learning::BackPropagation b(q); if(argc >1) { @@ -42,6 +35,6 @@ int main(int argc, char**) for(int i=0;i<2;i++) { b.teach(p[i%2],s[i%2]); - std::cerr << i%2 <<". FOR: [" << p[i%2].representation()[0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n"; + std::cerr << i%2 <<". FOR: [" << p[i%2][0] << "] res: " << q.solve(p[i%2])[0] << " should be " << s[i%2][0]<<"\n"; } } \ No newline at end of file diff --git a/tests/nn-bp-xor.cpp b/tests/nn-bp-xor.cpp index 8cd8108..479ca5b 100644 --- a/tests/nn-bp-xor.cpp +++ b/tests/nn-bp-xor.cpp @@ -1,10 +1,10 @@ -#include "../src/NeuronNetwork/FeedForward" -#include "../src/NeuronNetwork/Learning/BackPropagation" +#include "../src/NeuralNetwork/FeedForward" +#include "../src/NeuralNetwork/Learning/BackPropagation" #include #include -class X: public Shin::NeuronNetwork::Problem +class X: public Shin::Problem { public: X(const X& a) :Problem(a) {} @@ -17,14 +17,14 @@ int main() for (int test=0;test<2;test++) { - Shin::NeuronNetwork::FeedForward q({2,3,1}); - Shin::NeuronNetwork::Learning::BackPropagation b(q); + Shin::NeuralNetwork::FeedForward q({2,3,1}); + Shin::NeuralNetwork::Learning::BackPropagation b(q); - std::vector > set; - set.push_back(std::pair(Shin::NeuronNetwork::Problem({0,0}),Shin::NeuronNetwork::Solution({0}))); - set.push_back(std::pair(Shin::NeuronNetwork::Problem({1,0}),Shin::NeuronNetwork::Solution({1}))); - set.push_back(std::pair(Shin::NeuronNetwork::Problem({1,1}),Shin::NeuronNetwork::Solution({0}))); - set.push_back(std::pair(Shin::NeuronNetwork::Problem({0,1}),Shin::NeuronNetwork::Solution({1}))); + std::vector > set; + set.push_back(std::pair(Shin::Problem({0,0}),Shin::Solution({0}))); + set.push_back(std::pair(Shin::Problem({1,0}),Shin::Solution({1}))); + set.push_back(std::pair(Shin::Problem({1,1}),Shin::Solution({0}))); + set.push_back(std::pair(Shin::Problem({0,1}),Shin::Solution({1}))); if(test) { std::cerr << "Testing with entropy\n"; diff --git a/tests/nn-obp-xor.cpp b/tests/nn-obp-xor.cpp index 1152a9e..dd60a97 100644 --- a/tests/nn-obp-xor.cpp +++ b/tests/nn-obp-xor.cpp @@ -1,10 +1,10 @@ -#include "../src/NeuronNetwork/FeedForward" -#include "../src/NeuronNetwork/Learning/OpticalBackPropagation" +#include "../src/NeuralNetwork/FeedForward" +#include "../src/NeuralNetwork/Learning/OpticalBackPropagation" #include #include -class X: public Shin::NeuronNetwork::Problem +class X: public Shin::Problem { public: X(const X& a) :Problem(a) {} @@ -16,15 +16,15 @@ int main() srand(time(NULL)); for (int test=0;test<2;test++) { - Shin::NeuronNetwork::FeedForward q({2,40,1}); - Shin::NeuronNetwork::Learning::OpticalBackPropagation b(q); + Shin::NeuralNetwork::FeedForward q({2,40,1}); + Shin::NeuralNetwork::Learning::OpticalBackPropagation b(q); b.setLearningCoeficient(0.1); - std::vector > set; - set.push_back(std::pair(Shin::NeuronNetwork::Problem({0,0}),Shin::NeuronNetwork::Solution({0}))); - set.push_back(std::pair(Shin::NeuronNetwork::Problem({1,0}),Shin::NeuronNetwork::Solution({1}))); - set.push_back(std::pair(Shin::NeuronNetwork::Problem({1,1}),Shin::NeuronNetwork::Solution({0}))); - set.push_back(std::pair(Shin::NeuronNetwork::Problem({0,1}),Shin::NeuronNetwork::Solution({1}))); + std::vector > set; + set.push_back(std::pair(Shin::Problem({0,0}),Shin::Solution({0}))); + set.push_back(std::pair(Shin::Problem({1,0}),Shin::Solution({1}))); + set.push_back(std::pair(Shin::Problem({1,1}),Shin::Solution({0}))); + set.push_back(std::pair(Shin::Problem({0,1}),Shin::Solution({1}))); if(test) { std::cerr << "Testing with entropy\n"; diff --git a/tests/nn-test.cpp b/tests/nn-test.cpp index f708f58..214a06b 100644 --- a/tests/nn-test.cpp +++ b/tests/nn-test.cpp @@ -1,13 +1,12 @@ -#include "../src/NeuronNetwork/FeedForward" -#include "../src/NeuronNetwork/FeedForward" -#include "../src/NeuronNetwork/Learning/BackPropagation" +#include "../src/NeuralNetwork/FeedForward" +#include "../src/NeuralNetwork/Learning/BackPropagation" #include #include //typedef Shin::NeuronNetwork::Problem X; -class X: public Shin::NeuronNetwork::Problem +class X: public Shin::Problem { public: X(const X& a) :Problem(a) {} @@ -17,21 +16,21 @@ class X: public Shin::NeuronNetwork::Problem int main(int argc,char**) { srand(time(NULL)); - std::vector s; + std::vector s; std::vector p; p.push_back(X(std::vector({0,0}))); - s.push_back(Shin::NeuronNetwork::Solution(std::vector({0.4,0.3,0.2,0.1}))); + s.push_back(Shin::Solution(std::vector({0.4,0.3,0.2,0.1}))); p.push_back(X(std::vector({0,0.5}))); - s.push_back(Shin::NeuronNetwork::Solution(std::vector({0.6,0.3,0.2,0.5}))); + s.push_back(Shin::Solution(std::vector({0.6,0.3,0.2,0.5}))); p.push_back(X(std::vector({0.4,0.5}))); - s.push_back(Shin::NeuronNetwork::Solution(std::vector({0.4,0.4,0.2,0.8}))); - Shin::NeuronNetwork::FeedForward q({2,4,4,4},1.0); - Shin::NeuronNetwork::Learning::BackPropagation bp(q); + s.push_back(Shin::Solution(std::vector({0.4,0.4,0.2,0.8}))); + Shin::NeuralNetwork::FeedForward q({2,4,4,4},1.0); + Shin::NeuralNetwork::Learning::BackPropagation bp(q); bp.setLearningCoeficient(0.2); for(int i=0;i<3;i++) { - Shin::NeuronNetwork::Solution sp =q.solve(p[i]); + Shin::Solution sp =q.solve(p[i]); std::cerr << sp[0] << "," << sp[1] << "," << sp[2] << "," << sp[3] << "\n"; } for(int i=0;i<4;i++) @@ -44,7 +43,7 @@ int main(int argc,char**) std::cerr << "XXXXXXXXXXXX\n"; for(int i=0;i<3;i++) { - Shin::NeuronNetwork::Solution sp =q.solve(p[i]); + Shin::Solution sp =q.solve(p[i]); std::cerr << sp[0] << "," << sp[1] << "," << sp[2] << "," << sp[3] << "\n"; } } \ No newline at end of file