19# if defined(__i686__) || defined(__x86_64__)
20# error Implementation only for AVX capable architectures
24# include <immintrin.h>
32#if defined(FAST_FLOAT)
34 const unsigned quot = n / 8;
35 const unsigned rem = n % 8;
36 __m256 t0 = _mm256_setzero_ps();
37 for (
unsigned k = 0; k < quot; k++) {
38 __m256 f0 = _mm256_loadu_ps(u);
39 __m256 f1 = _mm256_loadu_ps(v);
40 f0 = _mm256_mul_ps(f0, f1);
41 t0 = _mm256_add_ps(t0, f0);
45 alignas(32)
float tmp[8];
46 _mm256_store_ps(tmp, t0);
47 float result = tmp[0] + tmp[1] + tmp[2] + tmp[3] + tmp[4] + tmp[5] + tmp[6] + tmp[7];
48 for (
unsigned k = 0; k < rem; k++) {
49 result += *u++ * *v++;
54double DotProductAVX(
const double *u,
const double *v,
int n) {
55 const unsigned quot = n / 8;
56 const unsigned rem = n % 8;
57 __m256d t0 = _mm256_setzero_pd();
58 __m256d t1 = _mm256_setzero_pd();
59 for (
unsigned k = 0; k < quot; k++) {
60 __m256d f0 = _mm256_loadu_pd(u);
61 __m256d f1 = _mm256_loadu_pd(v);
62 f0 = _mm256_mul_pd(f0, f1);
63 t0 = _mm256_add_pd(t0, f0);
66 __m256d f2 = _mm256_loadu_pd(u);
67 __m256d f3 = _mm256_loadu_pd(v);
68 f2 = _mm256_mul_pd(f2, f3);
69 t1 = _mm256_add_pd(t1, f2);
73 t0 = _mm256_hadd_pd(t0, t1);
74 alignas(32)
double tmp[4];
75 _mm256_store_pd(tmp, t0);
76 double result = tmp[0] + tmp[1] + tmp[2] + tmp[3];
77 for (
unsigned k = 0; k < rem; k++) {
78 result += *u++ * *v++;
TFloat DotProductAVX(const TFloat *u, const TFloat *v, int n)