37#ifndef _mm256_set_m128i
38#define _mm256_set_m128i(hi,lo) _mm256_insertf128_si256(_mm256_castsi128_si256(lo),(hi),1)
63 return _mm256_set_ps(b,a,b,a,b,a,b,a);
67 return _mm256_set_ps(a,a,a,a,a,a,a,a);
71 return _mm256_set_pd(b,a,b,a);
75 return _mm256_set_pd(a,a,a,a);
79 return _mm256_set1_epi32(a);
94 _mm256_store_si256((__m256i*)I,a);
102 _mm256_stream_ps(a,b);
106 _mm256_stream_pd(a,b);
123 return _mm256_set_ps(a[7],a[6],a[5],a[4],a[3],a[2],a[1],a[0]);
127 return _mm256_set_pd(a[3],a[2],a[1],a[0]);
131 return _mm256_set_epi32(a[7],a[6],a[5],a[4],a[3],a[2],a[1],a[0]);
136template <
typename Out_type,
typename In_type>
141 printf(
"Error, using wrong Reduce function\n");
153 return _mm256_add_ps(a,b);
157 return _mm256_add_pd(a,b);
161#if defined (AVX1) || defined (AVXFMA) || defined (AVXFMA4)
164 a0 = _mm256_extractf128_si256(a,0);
165 b0 = _mm256_extractf128_si256(b,0);
166 a1 = _mm256_extractf128_si256(a,1);
167 b1 = _mm256_extractf128_si256(b,1);
168 a0 = _mm_add_epi32(a0,b0);
169 a1 = _mm_add_epi32(a1,b1);
173 return _mm256_add_epi32(a,b);
181 return _mm256_sub_ps(a,b);
185 return _mm256_sub_pd(a,b);
189#if defined (AVX1) || defined (AVXFMA) || defined (AVXFMA4)
192 a0 = _mm256_extractf128_si256(a,0);
193 b0 = _mm256_extractf128_si256(b,0);
194 a1 = _mm256_extractf128_si256(a,1);
195 b1 = _mm256_extractf128_si256(b,1);
196 a0 = _mm_sub_epi32(a0,b0);
197 a1 = _mm_sub_epi32(a1,b1);
201 return _mm256_sub_epi32(a,b);
211 return _mm256_mul_ps(ymm0,b);
215 ymm0 = _mm256_shuffle_pd(a,a,0x0);
216 return _mm256_mul_pd(ymm0,b);
221 __m256 ymm0 = _mm256_moveldup_ps(a);
222 return _mm256_add_ps(_mm256_mul_ps( ymm0, b),c);
225 __m256d ymm0 = _mm256_shuffle_pd( a, a, 0x0 );
226 return _mm256_add_pd(_mm256_mul_pd( ymm0, b),c);
234 __m256 ymm0,ymm1,ymm2;
236 ymm0 = _mm256_mul_ps(ymm0,b);
240 ymm1 = _mm256_mul_ps(ymm1,ymm2);
241 return _mm256_addsub_ps(ymm0,ymm1);
247 a_imag = _mm256_mul_ps( a_imag,tmp );
248 return _mm256_maddsub_ps( a_real, b, a_imag );
250#if defined (AVX2) || defined (AVXFMA)
251 __m256 a_real = _mm256_moveldup_ps( a );
252 __m256 a_imag = _mm256_movehdup_ps( a );
254 return _mm256_fmaddsub_ps( a_real, b, a_imag );
282 __m256d ymm0,ymm1,ymm2;
283 ymm0 = _mm256_shuffle_pd(a,a,0x0);
284 ymm0 = _mm256_mul_pd(ymm0,b);
285 ymm1 = _mm256_shuffle_pd(b,b,0x5);
286 ymm2 = _mm256_shuffle_pd(a,a,0xF);
287 ymm1 = _mm256_mul_pd(ymm1,ymm2);
288 return _mm256_addsub_pd(ymm0,ymm1);
291 __m256d a_real = _mm256_shuffle_pd(a,a,0x0);
292 __m256d a_imag = _mm256_shuffle_pd(a,a,0xF);
293 a_imag = _mm256_mul_pd( a_imag, _mm256_permute_pd( b, 0x5 ) );
294 return _mm256_maddsub_pd( a_real, b, a_imag );
296#if defined (AVX2) || defined (AVXFMA)
297 __m256d a_real = _mm256_movedup_pd( a );
298 __m256d a_imag = _mm256_shuffle_pd(a,a,0xF);
299 a_imag = _mm256_mul_pd( a_imag, _mm256_permute_pd( b, 0x5 ) );
300 return _mm256_fmaddsub_pd( a_real, b, a_imag );
310 inline void Prep(__m256 ari,__m256 &air) {
313 inline void Mul(__m256 ari,__m256 air,__m256 b,__m256 &riir,__m256 &iirr) {
317 inline void Madd(__m256 ari,__m256 air,__m256 b,__m256 &riir,__m256 &iirr) {
321 inline void End(__m256 ari,__m256 &air) {
330 inline void mac(__m256 &a, __m256 b, __m256 c){
332 a= _mm256_add_ps(_mm256_mul_ps(b,c),a);
335 a= _mm256_macc_ps(b,c,a);
337#if defined (AVX2) || defined (AVXFMA)
338 a= _mm256_fmadd_ps( b, c, a);
342 inline void mac(__m256d &a, __m256d b, __m256d c){
344 a= _mm256_add_pd(_mm256_mul_pd(b,c),a);
347 a= _mm256_macc_pd(b,c,a);
349#if defined (AVX2) || defined (AVXFMA)
350 a= _mm256_fmadd_pd( b, c, a);
356 return _mm256_mul_ps(a,b);
360 return _mm256_mul_pd(a,b);
364#if defined (AVX1) || defined (AVXFMA)
367 a0 = _mm256_extractf128_si256(a,0);
368 b0 = _mm256_extractf128_si256(b,0);
369 a1 = _mm256_extractf128_si256(a,1);
370 b1 = _mm256_extractf128_si256(b,1);
371 a0 = _mm_mullo_epi32(a0,b0);
372 a1 = _mm_mullo_epi32(a1,b1);
376 return _mm256_mullo_epi32(a,b);
385 return _mm256_div_ps(a, b);
389 return _mm256_div_pd(a,b);
397 return _mm256_xor_ps(_mm256_addsub_ps(_mm256_setzero_ps(),in), _mm256_set1_ps(-0.f));
401 return _mm256_xor_pd(_mm256_addsub_pd(_mm256_setzero_pd(),in), _mm256_set1_pd(-0.f));
409 __m256 tmp =_mm256_addsub_ps(_mm256_setzero_ps(),in);
414 __m256d tmp = _mm256_addsub_pd(_mm256_setzero_pd(),in);
415 return _mm256_shuffle_pd(tmp,tmp,0x5);
423 return _mm256_addsub_ps(_mm256_setzero_ps(),tmp);
427 __m256d tmp = _mm256_shuffle_pd(in,in,0x5);
428 return _mm256_addsub_pd(_mm256_setzero_pd(),tmp);
439 return _mm256_permute2f128_ps(in,in,0x01);
452 return _mm256_permute2f128_pd(in,in,0x01);
455 return _mm256_shuffle_pd(in,in,0x5);
466 static inline __m256i
StoH (__m256 a,__m256 b) {
469 __m128i ha = _mm256_cvtps_ph(a,0);
470 __m128i hb = _mm256_cvtps_ph(b,0);
471 h =(__m256i) _mm256_castps128_ps256((__m128)ha);
472 h =(__m256i) _mm256_insertf128_ps((__m256)h,(__m128)hb,1);
478 static inline void HtoS (__m256i h,__m256 &sa,__m256 &sb) {
480 sa = _mm256_cvtph_ps((__m128i)_mm256_extractf128_ps((__m256)h,0));
481 sb = _mm256_cvtph_ps((__m128i)_mm256_extractf128_ps((__m256)h,1));
486 static inline __m256
DtoS (__m256d a,__m256d b) {
487 __m128 sa = _mm256_cvtpd_ps(a);
488 __m128 sb = _mm256_cvtpd_ps(b);
489 __m256 s = _mm256_castps128_ps256(sa);
490 s = _mm256_insertf128_ps(s,sb,1);
493 static inline void StoD (__m256 s,__m256d &a,__m256d &b) {
494 a = _mm256_cvtps_pd(_mm256_extractf128_ps(s,0));
495 b = _mm256_cvtps_pd(_mm256_extractf128_ps(s,1));
497 static inline __m256i
DtoH (__m256d a,__m256d b,__m256d c,__m256d d) {
503 static inline void HtoD (__m256i h,__m256d &a,__m256d &b,__m256d &c,__m256d &d) {
512 static inline void Exchange0(__m256 &out1,__m256 &out2,__m256 in1,__m256 in2){
516 out1= _mm256_permute2f128_ps(in1,in2,0x20);
517 out2= _mm256_permute2f128_ps(in1,in2,0x31);
519 static inline void Exchange1(__m256 &out1,__m256 &out2,__m256 in1,__m256 in2){
526 static inline void Exchange2(__m256 &out1,__m256 &out2,__m256 in1,__m256 in2){
539 static inline void Exchange3(__m256 &out1,__m256 &out2,__m256 in1,__m256 in2){
544 static inline void Exchange0(__m256d &out1,__m256d &out2,__m256d in1,__m256d in2){
545 out1= _mm256_permute2f128_pd(in1,in2,0x20);
546 out2= _mm256_permute2f128_pd(in1,in2,0x31);
549 static inline void Exchange1(__m256d &out1,__m256d &out2,__m256d in1,__m256d in2){
550 out1= _mm256_shuffle_pd(in1,in2,0x0);
551 out2= _mm256_shuffle_pd(in1,in2,0xF);
553 static inline void Exchange2(__m256d &out1,__m256d &out2,__m256d in1,__m256d in2){
557 static inline void Exchange3(__m256d &out1,__m256d &out2,__m256d in1,__m256d in2){
565#define _mm256_alignr_epi32_grid(ret,a,b,n) ret=(__m256) _mm256_alignr_epi8((__m256i)a,(__m256i)b,(n*4)%16)
566#define _mm256_alignr_epi64_grid(ret,a,b,n) ret=(__m256d) _mm256_alignr_epi8((__m256i)a,(__m256i)b,(n*8)%16)
569#if defined (AVX1) || defined (AVXFMA)
570#define _mm256_alignr_epi32_grid(ret,a,b,n) { \
573 aa = _mm256_extractf128_ps(a,1); \
574 bb = _mm256_extractf128_ps(b,1); \
575 aa = (__m128)_mm_alignr_epi8((__m128i)aa,(__m128i)bb,(n*4)%16); \
576 ret = _mm256_insertf128_ps(ret,aa,1); \
578 aa = _mm256_extractf128_ps(a,0); \
579 bb = _mm256_extractf128_ps(b,0); \
580 aa = (__m128)_mm_alignr_epi8((__m128i)aa,(__m128i)bb,(n*4)%16); \
581 ret = _mm256_insertf128_ps(ret,aa,0); \
584#define _mm256_alignr_epi64_grid(ret,a,b,n) { \
587 aa = _mm256_extractf128_pd(a,1); \
588 bb = _mm256_extractf128_pd(b,1); \
589 aa = (__m128d)_mm_alignr_epi8((__m128i)aa,(__m128i)bb,(n*8)%16); \
590 ret = _mm256_insertf128_pd(ret,aa,1); \
592 aa = _mm256_extractf128_pd(a,0); \
593 bb = _mm256_extractf128_pd(b,0); \
594 aa = (__m128d)_mm_alignr_epi8((__m128i)aa,(__m128i)bb,(n*8)%16); \
595 ret = _mm256_insertf128_pd(ret,aa,0); \
602 static inline __m256
rotate(__m256 in,
int n){
615 static inline __m256d
rotate(__m256d in,
int n){
631 _mm256_alignr_epi32_grid(ret,in,tmp,n);
633 _mm256_alignr_epi32_grid(ret,tmp,in,n);
643 _mm256_alignr_epi64_grid(ret,in,tmp,n);
645 _mm256_alignr_epi64_grid(ret,tmp,in,n);
656 v1=Optimization::Permute::Permute0(in);
657 v1= _mm256_add_ps(v1,in);
658 v2=Optimization::Permute::Permute1(v1);
659 v1 = _mm256_add_ps(v1,v2);
661 return Grid::ComplexF(conv.
f[0],conv.
f[1]);
668 v1 = Optimization::Permute::Permute0(in);
669 v1 = _mm256_add_ps(v1,in);
670 v2 = Optimization::Permute::Permute1(v1);
671 v1 = _mm256_add_ps(v1,v2);
672 v2 = Optimization::Permute::Permute2(v1);
673 v1 = _mm256_add_ps(v1,v2);
683 v1 = Optimization::Permute::Permute0(in);
684 v1 = _mm256_add_pd(v1,in);
686 return Grid::ComplexD(conv.
f[0],conv.
f[1]);
693 v1 = Optimization::Permute::Permute0(in);
694 v1 = _mm256_add_pd(v1,in);
695 v2 = Optimization::Permute::Permute1(v1);
696 v1 = _mm256_add_pd(v1,v2);
710 v1 = _mm256_hadd_epi32(in, in);
711 v2 = _mm256_hadd_epi32(v1, v1);
712 u1 = _mm256_castsi256_si128(v2);
713 u2 = _mm256_extracti128_si256(v2, 1);
714 ret = _mm_add_epi32(u1, u2);
719 u1 = _mm256_extractf128_si256(in, 0);
720 u2 = _mm256_extractf128_si256(in, 1);
721 u3 = _mm_add_epi32(u1, u2);
722 u1 = _mm_hadd_epi32(u3, u3);
723 ret = _mm_hadd_epi32(u1, u1);
725 return _mm_cvtsi128_si32(ret);
740 for(
int i=0;i<size;i+=64){
741 _mm_prefetch(ptr+i+4096,_MM_HINT_T1);
742 _mm_prefetch(ptr+i+512,_MM_HINT_T0);
746 _mm_prefetch(ptr, _MM_HINT_T0);
755template <
typename S,
typename T>
using ReduceSIMD = Optimization::Reduce<S, T>;
758typedef Optimization::Sum
SumSIMD;
759typedef Optimization::Sub
SubSIMD;
760typedef Optimization::Div
DivSIMD;
Optimization::Vstream VstreamSIMD
Optimization::TimesMinusI TimesMinusISIMD
Optimization::MultComplex MultComplexSIMD
Optimization::TimesI TimesISIMD
Optimization::Reduce< S, T > ReduceSIMD
Optimization::Mult MultSIMD
Optimization::MaddRealPart MaddRealPartSIMD
Optimization::vecd SIMD_Dtype
Optimization::veci SIMD_Itype
Optimization::Vstore VstoreSIMD
Optimization::Conj ConjSIMD
Optimization::vecf SIMD_Ftype
Optimization::Vsplat VsplatSIMD
Optimization::Sum SumSIMD
Optimization::Sub SubSIMD
Optimization::Div DivSIMD
Optimization::MultRealPart MultRealPartSIMD
Optimization::Vset VsetSIMD
Optimization::vech SIMD_Htype
#define _mm256_set_m128i(hi, lo)
void prefetch_HINT_T0(const char *ptr)
void v_prefetch0(int size, const char *ptr)
void mac(Lattice< obj1 > &ret, const Lattice< obj2 > &lhs, const Lattice< obj3 > &rhs)
Lattice< vobj > real(const Lattice< vobj > &lhs)
Lattice< vobj > imag(const Lattice< vobj > &lhs)
#define NAMESPACE_BEGIN(A)
#define _MM_SELECT_FOUR_FOUR(A, B, C, D)
static INTERNAL_PRECISION F
__m256 operator()(__m256 in)
__m256d operator()(__m256d in)
__m256d operator()(__m256d a, __m256d b)
__m256 operator()(__m256 a, __m256 b)
static void Exchange2(__m256 &out1, __m256 &out2, __m256 in1, __m256 in2)
static void Exchange1(__m256d &out1, __m256d &out2, __m256d in1, __m256d in2)
static void Exchange3(__m256 &out1, __m256 &out2, __m256 in1, __m256 in2)
static void Exchange1(__m256 &out1, __m256 &out2, __m256 in1, __m256 in2)
static void Exchange3(__m256d &out1, __m256d &out2, __m256d in1, __m256d in2)
static void Exchange0(__m256d &out1, __m256d &out2, __m256d in1, __m256d in2)
static void Exchange2(__m256d &out1, __m256d &out2, __m256d in1, __m256d in2)
static void Exchange0(__m256 &out1, __m256 &out2, __m256 in1, __m256 in2)
__m256d operator()(__m256d a, __m256d b, __m256d c)
__m256 operator()(__m256 a, __m256 b, __m256 c)
__m256d operator()(__m256d a, __m256d b)
__m256 operator()(__m256 a, __m256 b)
__m256 operator()(__m256 a, __m256 b)
__m256d operator()(__m256d a, __m256d b)
__m256d operator()(__m256d a, __m256d b)
void mac(__m256d &a, __m256d b, __m256d c)
__m256i operator()(__m256i a, __m256i b)
__m256 operator()(__m256 a, __m256 b)
void mac(__m256 &a, __m256 b, __m256 c)
static vec< T > Permute0(vec< T > in)
static __m256 Permute0(__m256 in)
static __m256d Permute2(__m256d in)
static __m256 Permute2(__m256 in)
static __m256 Permute3(__m256 in)
static __m256d Permute0(__m256d in)
static __m256d Permute3(__m256d in)
static __m256d Permute1(__m256d in)
static __m256 Permute1(__m256 in)
static void HtoD(__m256i h, __m256d &a, __m256d &b, __m256d &c, __m256d &d)
static vech StoH(const vecf &sa, const vecf &sb)
static __m256 DtoS(__m256d a, __m256d b)
static void HtoS(__m256i h, __m256 &sa, __m256 &sb)
static __m256i StoH(__m256 a, __m256 b)
static void StoD(vecf s, vecd &a, vecd &b)
static vecf DtoS(vecd a, vecd b)
static __m256i DtoH(__m256d a, __m256d b, __m256d c, __m256d d)
static void StoD(__m256 s, __m256d &a, __m256d &b)
static void HtoS(vech h, vecf &sa, vecf &sb)
Out_type operator()(In_type in)
static vec< T > tRotate(vec< T > in)
static __m256 tRotate(__m256 in)
static __m256d rotate(__m256d in, int n)
static __m256 rotate(__m256 in, int n)
static __m256d tRotate(__m256d in)
__m256i operator()(__m256i a, __m256i b)
__m256 operator()(__m256 a, __m256 b)
__m256d operator()(__m256d a, __m256d b)
__m256d operator()(__m256d a, __m256d b)
__m256 operator()(__m256 a, __m256 b)
__m256i operator()(__m256i a, __m256i b)
__m256 operator()(__m256 in)
__m256d operator()(__m256d in)
__m256d operator()(__m256d in)
__m256 operator()(__m256 in)
__m256d operator()(Grid::ComplexD *a)
__m256 operator()(Grid::ComplexF *a)
__m256 operator()(float *a)
__m256i operator()(Integer *a)
__m256d operator()(double *a)
__m256d operator()(double a)
__m256 operator()(float a)
__m256d operator()(double a, double b)
__m256i operator()(Integer a)
__m256 operator()(float a, float b)
void operator()(__m256d a, double *D)
void operator()(__m256 a, float *F)
void operator()(__m256i a, Integer *I)
void operator()(float *a, __m256 b)
void operator()(double *a, __m256d b)