Files
NeuralNetworkLib/include/NeuralNetwork/BasisFunction/Linear.h

100 lines
3.0 KiB
C++

#pragma once
#include <mmintrin.h>
#include <xmmintrin.h>
#include <emmintrin.h>
#include <xmmintrin.h>
#include <pmmintrin.h>
#include <immintrin.h>
#include "./StreamingBasisFunction.h"
#include "../../sse_mathfun.h"
namespace NeuralNetwork {
namespace BasisFunction {
class Linear: public StreamingBasisFunction {
public:
Linear() {}
inline virtual float operator()(const std::vector<float>& weights, const std::vector<float>& input) const override {
#ifdef USE_AVX
//TODO: check sizes!!!
std::size_t inputSize=input.size();
size_t alignedPrev=inputSize-inputSize%8;
const float* weightsData=weights.data();
const float* inputData=input.data();
union {
__m256 avx;
float f[8];
} partialSolution;
partialSolution.avx=_mm256_setzero_ps();
for(size_t k=0;k<alignedPrev;k+=8) {
//TODO: asignement!! -- possible speedup
partialSolution.avx=_mm256_add_ps(partialSolution.avx,_mm256_mul_ps(_mm256_loadu_ps(weightsData+k),_mm256_loadu_ps(inputData+k)));
}
for(size_t k=alignedPrev;k<inputSize;k++) {
partialSolution.avx=_mm256_add_ps(partialSolution.avx,_mm256_mul_ps(_mm256_set_ps(weightsData[k],0,0,0,0,0,0,0),_mm256_set_ps(inputData[k],0,0,0,0,0,0,0)));
}
partialSolution.avx = _mm256_add_ps(partialSolution.avx, _mm256_permute2f128_ps(partialSolution.avx , partialSolution.avx , 1));
partialSolution.avx = _mm256_hadd_ps(partialSolution.avx, partialSolution.avx);
partialSolution.avx = _mm256_hadd_ps(partialSolution.avx, partialSolution.avx);
return partialSolution.f[0];
#else
#ifdef USE_SSE
size_t inputSize=input.size();
size_t alignedPrev=inputSize-inputSize%4;
const float* weightsData=weights.data();
const float* inputData=input.data();
vec4f partialSolution;
partialSolution.sse =_mm_setzero_ps();
//TODO prefetch ??
for(register size_t k=0;k<alignedPrev;k+=4) {
partialSolution.sse=_mm_add_ps(partialSolution.sse,_mm_mul_ps(_mm_load_ps(weightsData+k),_mm_load_ps(inputData+k)));
}
for(register size_t k=alignedPrev;k<inputSize;k++) {
partialSolution.sse=_mm_add_ps(partialSolution.sse,_mm_mul_ps(_mm_load_ss(weightsData+k),_mm_load_ss(inputData+k)));
}
#ifdef USE_SSE2 //pre-SSE3 solution
partialSolution.sse= _mm_add_ps(_mm_movehl_ps(partialSolution.sse, partialSolution.sse), partialSolution.sse);
partialSolution.sse=_mm_add_ss(partialSolution.sse, _mm_shuffle_ps(partialSolution.sse,partialSolution.sse, 1));
#else
partialSolution.sse = _mm_hadd_ps(partialSolution.sse, partialSolution.sse);
partialSolution.sse = _mm_hadd_ps(partialSolution.sse, partialSolution.sse);
#endif
return partialSolution.f[0];
#else
register float tmp = 0;
size_t inputSize=input.size();
for(size_t k=0;k<inputSize;k++) {
tmp+=input[k]*weights[k];
}
return tmp;
#endif
#endif
}
virtual BasisFunction* clone() const override {
return new Linear();
}
virtual std::string stringify() const override {
return "{ \"class\": \"NeuralNetwork::BasisFunction::Linear\" }";
}
};
}
}