forked from jratcliff63367/sse2neon
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathSSE2NEON.h
1688 lines (1438 loc) · 75.9 KB
/
SSE2NEON.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#ifndef SSE2NEON_H
#define SSE2NEON_H
// This header file provides a simple API translation layer
// between SSE intrinsics to their corresponding ARM NEON versions
//
// This header file does not (yet) translate *all* of the SSE intrinsics.
// Since this is in support of a specific porting effort, I have only
// included the intrinsics I needed to get my port to work.
//
// Questions/Comments/Feedback send to: [email protected]
//
// If you want to improve or add to this project, send me an
// email and I will probably approve your access to the depot.
//
// Project is located here:
//
// https://github.com/jratcliff63367/sse2neon
//
// Show your appreciation for open source by sending me a bitcoin tip to the following
// address.
//
// TipJar: 1PzgWDSyq4pmdAXRH8SPUtta4SWGrt4B1p :
// https://blockchain.info/address/1PzgWDSyq4pmdAXRH8SPUtta4SWGrt4B1p
//
//
// Contributors to this project are:
//
// John W. Ratcliff : [email protected]
// Brandon Rowlett : [email protected]
// Ken Fast : [email protected]
// Eric van Beurden : [email protected]
// Alexander Potylitsin : [email protected]
//
//
// *********************************************************************************************************************
// apoty: March 17, 2017
// Current version was changed in most to fix issues and potential issues.
// All unit tests were rewritten as a part of forge lib project to cover all implemented functions.
// *********************************************************************************************************************
// Release notes for January 20, 2017 version:
//
// The unit tests have been refactored. They no longer assert on an error, instead they return a pass/fail condition
// The unit-tests now test 10,000 random float and int values against each intrinsic.
//
// SSE2NEON now supports 95 SSE intrinsics. 39 of them have formal unit tests which have been implemented and
// fully tested on NEON/ARM. The remaining 56 still need unit tests implemented.
//
// A struct is now defined in this header file called 'SIMDVec' which can be used by applications which
// attempt to access the contents of an _m128 struct directly. It is important to note that accessing the __m128
// struct directly is bad coding practice by Microsoft: @see: https://msdn.microsoft.com/en-us/library/ayeb3ayc.aspx
//
// However, some legacy source code may try to access the contents of an __m128 struct directly so the developer
// can use the SIMDVec as an alias for it. Any casting must be done manually by the developer, as you cannot
// cast or otherwise alias the base NEON data type for intrinsic operations.
//
// A bug was found with the _mm_shuffle_ps intrinsic. If the shuffle permutation was not one of the ones with
// a custom/unique implementation causing it to fall through to the default shuffle implementation it was failing
// to return the correct value. This is now fixed.
//
// A bug was found with the _mm_cvtps_epi32 intrinsic. This converts floating point values to integers.
// It was not honoring the correct rounding mode. In SSE the default rounding mode when converting from float to int
// is to use 'round to even' otherwise known as 'bankers rounding'. ARMv7 did not support this feature but ARMv8 does.
// As it stands today, this header file assumes ARMv8. If you are trying to target really old ARM devices, you may get
// a build error.
//
// Support for a number of new intrinsics was added, however, none of them yet have unit-tests to 100% confirm they are
// producing the correct results on NEON. These unit tests will be added as soon as possible.
//
// Here is the list of new instrinsics which have been added:
//
// _mm_cvtss_f32 : extracts the lower order floating point value from the parameter
// _mm_add_ss : adds the scalar single - precision floating point values of a and b
// _mm_div_ps : Divides the four single - precision, floating - point values of a and b.
// _mm_div_ss : Divides the scalar single - precision floating point value of a by b.
// _mm_sqrt_ss : Computes the approximation of the square root of the scalar single - precision floating point value of in.
// _mm_rsqrt_ps : Computes the approximations of the reciprocal square roots of the four single - precision floating point values of in.
// _mm_comilt_ss : Compares the lower single - precision floating point scalar values of a and b using a less than operation
// _mm_comigt_ss : Compares the lower single - precision floating point scalar values of a and b using a greater than operation.
// _mm_comile_ss : Compares the lower single - precision floating point scalar values of a and b using a less than or equal operation.
// _mm_comige_ss : Compares the lower single - precision floating point scalar values of a and b using a greater than or equal operation.
// _mm_comieq_ss : Compares the lower single - precision floating point scalar values of a and b using an equality operation.
// _mm_comineq_s : Compares the lower single - precision floating point scalar values of a and b using an inequality operation
// _mm_unpackhi_epi8 : Interleaves the upper 8 signed or unsigned 8 - bit integers in a with the upper 8 signed or unsigned 8 - bit integers in b.
// _mm_unpackhi_epi16: Interleaves the upper 4 signed or unsigned 16 - bit integers in a with the upper 4 signed or unsigned 16 - bit integers in b.
//
// *********************************************************************************************************************
/*
** The MIT license:
**
** Permission is hereby granted, free of charge, to any person obtaining a copy
** of this software and associated documentation files (the "Software"), to deal
** in the Software without restriction, including without limitation the rights
** to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
** copies of the Software, and to permit persons to whom the Software is furnished
** to do so, subject to the following conditions:
**
** The above copyright notice and this permission notice shall be included in all
** copies or substantial portions of the Software.
** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
** WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
** CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#define ENABLE_CPP_VERSION 0
#if defined(__GNUC__) || defined(__clang__)
# pragma push_macro("FORCE_INLINE")
# pragma push_macro("ALIGN_STRUCT")
# define FORCE_INLINE static inline __attribute__((always_inline))
# define ALIGN_STRUCT(x) __attribute__((aligned(x)))
#else
# error "Macro name collisions may happens with unknown compiler"
# define FORCE_INLINE static inline
# define ALIGN_STRUCT(x) __declspec(align(x))
#endif
#include <stdint.h>
#include "arm_neon.h"
/*******************************************************/
/* MACRO for shuffle parameter for _mm_shuffle_ps(). */
/* Argument fp3 is a digit[0123] that represents the fp*/
/* from argument "b" of mm_shuffle_ps that will be */
/* placed in fp3 of result. fp2 is the same for fp2 in */
/* result. fp1 is a digit[0123] that represents the fp */
/* from argument "a" of mm_shuffle_ps that will be */
/* places in fp1 of result. fp0 is the same for fp0 of */
/* result */
/*******************************************************/
#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
(((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0)))
/* indicate immediate constant argument in a given range */
#define __constrange(a,b) \
const
typedef float32x4_t __m128;
typedef int32x4_t __m128i;
// ******************************************
// type-safe casting between types
// ******************************************
#define vreinterpretq_m128_f16(x) \
vreinterpretq_f32_f16(x)
#define vreinterpretq_m128_f32(x) \
(x)
#define vreinterpretq_m128_f64(x) \
vreinterpretq_f32_f64(x)
#define vreinterpretq_m128_u8(x) \
vreinterpretq_f32_u8(x)
#define vreinterpretq_m128_u16(x) \
vreinterpretq_f32_u16(x)
#define vreinterpretq_m128_u32(x) \
vreinterpretq_f32_u32(x)
#define vreinterpretq_m128_u64(x) \
vreinterpretq_f32_u64(x)
#define vreinterpretq_m128_s8(x) \
vreinterpretq_f32_s8(x)
#define vreinterpretq_m128_s16(x) \
vreinterpretq_f32_s16(x)
#define vreinterpretq_m128_s32(x) \
vreinterpretq_f32_s32(x)
#define vreinterpretq_m128_s64(x) \
vreinterpretq_f32_s64(x)
#define vreinterpretq_f16_m128(x) \
vreinterpretq_f16_f32(x)
#define vreinterpretq_f32_m128(x) \
(x)
#define vreinterpretq_f64_m128(x) \
vreinterpretq_f64_f32(x)
#define vreinterpretq_u8_m128(x) \
vreinterpretq_u8_f32(x)
#define vreinterpretq_u16_m128(x) \
vreinterpretq_u16_f32(x)
#define vreinterpretq_u32_m128(x) \
vreinterpretq_u32_f32(x)
#define vreinterpretq_u64_m128(x) \
vreinterpretq_u64_f32(x)
#define vreinterpretq_s8_m128(x) \
vreinterpretq_s8_f32(x)
#define vreinterpretq_s16_m128(x) \
vreinterpretq_s16_f32(x)
#define vreinterpretq_s32_m128(x) \
vreinterpretq_s32_f32(x)
#define vreinterpretq_s64_m128(x) \
vreinterpretq_s64_f32(x)
#define vreinterpretq_m128i_s8(x) \
vreinterpretq_s32_s8(x)
#define vreinterpretq_m128i_s16(x) \
vreinterpretq_s32_s16(x)
#define vreinterpretq_m128i_s32(x) \
(x)
#define vreinterpretq_m128i_s64(x) \
vreinterpretq_s32_s64(x)
#define vreinterpretq_m128i_u8(x) \
vreinterpretq_s32_u8(x)
#define vreinterpretq_m128i_u16(x) \
vreinterpretq_s32_u16(x)
#define vreinterpretq_m128i_u32(x) \
vreinterpretq_s32_u32(x)
#define vreinterpretq_m128i_u64(x) \
vreinterpretq_s32_u64(x)
#define vreinterpretq_s8_m128i(x) \
vreinterpretq_s8_s32(x)
#define vreinterpretq_s16_m128i(x) \
vreinterpretq_s16_s32(x)
#define vreinterpretq_s32_m128i(x) \
(x)
#define vreinterpretq_s64_m128i(x) \
vreinterpretq_s64_s32(x)
#define vreinterpretq_u8_m128i(x) \
vreinterpretq_u8_s32(x)
#define vreinterpretq_u16_m128i(x) \
vreinterpretq_u16_s32(x)
#define vreinterpretq_u32_m128i(x) \
vreinterpretq_u32_s32(x)
#define vreinterpretq_u64_m128i(x) \
vreinterpretq_u64_s32(x)
// union intended to allow direct access to an __m128 variable using the names that the MSVC
// compiler provides. This union should really only be used when trying to access the members
// of the vector as integer values. GCC/clang allow native access to the float members through
// a simple array access operator (in C since 4.6, in C++ since 4.8).
//
// Ideally direct accesses to SIMD vectors should not be used since it can cause a performance
// hit. If it really is needed however, the original __m128 variable can be aliased with a
// pointer to this union and used to access individual components. The use of this union should
// be hidden behind a macro that is used throughout the codebase to access the members instead
// of always declaring this type of variable.
typedef union ALIGN_STRUCT(16) SIMDVec
{
float m128_f32[4]; // as floats - do not to use this. Added for convenience.
int8_t m128_i8[16]; // as signed 8-bit integers.
int16_t m128_i16[8]; // as signed 16-bit integers.
int32_t m128_i32[4]; // as signed 32-bit integers.
int64_t m128_i64[2]; // as signed 64-bit integers.
uint8_t m128_u8[16]; // as unsigned 8-bit integers.
uint16_t m128_u16[8]; // as unsigned 16-bit integers.
uint32_t m128_u32[4]; // as unsigned 32-bit integers.
uint64_t m128_u64[2]; // as unsigned 64-bit integers.
} SIMDVec;
// ******************************************
// Set/get methods
// ******************************************
// extracts the lower order floating point value from the parameter : https://msdn.microsoft.com/en-us/library/bb514059%28v=vs.120%29.aspx?f=255&MSPPError=-2147217396
FORCE_INLINE float _mm_cvtss_f32(__m128 a)
{
return vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
}
// Sets the 128-bit value to zero https://msdn.microsoft.com/en-us/library/vstudio/ys7dw0kh(v=vs.100).aspx
FORCE_INLINE __m128i _mm_setzero_si128()
{
return vreinterpretq_m128i_s32(vdupq_n_s32(0));
}
// Clears the four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/vstudio/tk1t2tbz(v=vs.100).aspx
FORCE_INLINE __m128 _mm_setzero_ps(void)
{
return vreinterpretq_m128_f32(vdupq_n_f32(0));
}
// Sets the four single-precision, floating-point values to w. https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx
FORCE_INLINE __m128 _mm_set1_ps(float _w)
{
return vreinterpretq_m128_f32(vdupq_n_f32(_w));
}
// Sets the four single-precision, floating-point values to w. https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx
FORCE_INLINE __m128 _mm_set_ps1(float _w)
{
return vreinterpretq_m128_f32(vdupq_n_f32(_w));
}
// Sets the four single-precision, floating-point values to the four inputs. https://msdn.microsoft.com/en-us/library/vstudio/afh0zf75(v=vs.100).aspx
FORCE_INLINE __m128 _mm_set_ps(float w, float z, float y, float x)
{
float __attribute__((aligned(16))) data[4] = { x, y, z, w };
return vreinterpretq_m128_f32(vld1q_f32(data));
}
// Sets the four single-precision, floating-point values to the four inputs in reverse order. https://msdn.microsoft.com/en-us/library/vstudio/d2172ct3(v=vs.100).aspx
FORCE_INLINE __m128 _mm_setr_ps(float w, float z , float y , float x )
{
float __attribute__ ((aligned (16))) data[4] = { w, z, y, x };
return vreinterpretq_m128_f32(vld1q_f32(data));
}
//added by hasindu
//Sets the 4 signed 32-bit integer values in reverse order https://technet.microsoft.com/en-us/library/security/27yb3ee5(v=vs.90).aspx
FORCE_INLINE __m128i _mm_setr_epi32(int i3, int i2, int i1, int i0)
{
int32_t __attribute__((aligned(16))) data[4] = { i3, i2, i1, i0 };
return vreinterpretq_m128i_s32(vld1q_s32(data));
}
//following added by hasindu
//Sets the 16 signed 8-bit integer values to b.https://msdn.microsoft.com/en-us/library/6e14xhyf(v=vs.100).aspx
FORCE_INLINE __m128i _mm_set1_epi8(char w)
{
return vreinterpretq_m128i_s8(vdupq_n_s8(w));
}
//following added by hasindu
//Sets the 8 signed 16-bit integer values to w. https://msdn.microsoft.com/en-us/library/k0ya3x0e(v=vs.90).aspx
FORCE_INLINE __m128i _mm_set1_epi16(short w)
{
return vreinterpretq_m128i_s16(vdupq_n_s16(w));
}
//following added by hasindu
//Sets the 8 signed 16-bit integer values. https://msdn.microsoft.com/en-au/library/3e0fek84(v=vs.90).aspx
FORCE_INLINE __m128i _mm_set_epi16(short i7, short i6, short i5, short i4, short i3, short i2, short i1, short i0)
{
int16_t __attribute__((aligned(16))) data[8] = { i0, i1, i2, i3, i4, i5, i6, i7 };
return vreinterpretq_m128i_s16(vld1q_s16(data));
}
// Sets the 4 signed 32-bit integer values to i. https://msdn.microsoft.com/en-us/library/vstudio/h4xscxat(v=vs.100).aspx
FORCE_INLINE __m128i _mm_set1_epi32(int _i)
{
return vreinterpretq_m128i_s32(vdupq_n_s32(_i));
}
// Sets the 4 signed 32-bit integer values. https://msdn.microsoft.com/en-us/library/vstudio/019beekt(v=vs.100).aspx
FORCE_INLINE __m128i _mm_set_epi32(int i3, int i2, int i1, int i0)
{
int32_t __attribute__((aligned(16))) data[4] = { i0, i1, i2, i3 };
return vreinterpretq_m128i_s32(vld1q_s32(data));
}
// Stores four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/vstudio/s3h4ay6y(v=vs.100).aspx
FORCE_INLINE void _mm_store_ps(float *p, __m128 a)
{
vst1q_f32(p, vreinterpretq_f32_m128(a));
}
// Stores four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/44e30x22(v=vs.100).aspx
FORCE_INLINE void _mm_storeu_ps(float *p, __m128 a)
{
vst1q_f32(p, vreinterpretq_f32_m128(a));
}
// Stores four 32-bit integer values as (as a __m128i value) at the address p. https://msdn.microsoft.com/en-us/library/vstudio/edk11s13(v=vs.100).aspx
FORCE_INLINE void _mm_store_si128(__m128i *p, __m128i a)
{
vst1q_s32((int32_t*) p, vreinterpretq_s32_m128i(a));
}
//added by hasindu (verify this for requirement of alignment)
// Stores four 32-bit integer values as (as a __m128i value) at the address p. https://msdn.microsoft.com/en-us/library/vstudio/edk11s13(v=vs.100).aspx
FORCE_INLINE void _mm_storeu_si128(__m128i *p, __m128i a)
{
vst1q_s32((int32_t*) p, vreinterpretq_s32_m128i(a));
}
// Stores the lower single - precision, floating - point value. https://msdn.microsoft.com/en-us/library/tzz10fbx(v=vs.100).aspx
FORCE_INLINE void _mm_store_ss(float *p, __m128 a)
{
vst1q_lane_f32(p, vreinterpretq_f32_m128(a), 0);
}
// Reads the lower 64 bits of b and stores them into the lower 64 bits of a. https://msdn.microsoft.com/en-us/library/hhwf428f%28v=vs.90%29.aspx
FORCE_INLINE void _mm_storel_epi64(__m128i* a, __m128i b)
{
uint64x1_t hi = vget_high_u64(vreinterpretq_u64_m128i(*a));
uint64x1_t lo = vget_low_u64(vreinterpretq_u64_m128i(b));
*a = vreinterpretq_m128i_u64(vcombine_u64(lo, hi));
}
// Loads a single single-precision, floating-point value, copying it into all four words https://msdn.microsoft.com/en-us/library/vstudio/5cdkf716(v=vs.100).aspx
FORCE_INLINE __m128 _mm_load1_ps(const float * p)
{
return vreinterpretq_m128_f32(vld1q_dup_f32(p));
}
// Loads four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/vstudio/zzd50xxt(v=vs.100).aspx
FORCE_INLINE __m128 _mm_load_ps(const float * p)
{
return vreinterpretq_m128_f32(vld1q_f32(p));
}
// Loads four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/x1b16s7z%28v=vs.90%29.aspx
FORCE_INLINE __m128 _mm_loadu_ps(const float * p)
{
// for neon, alignment doesn't matter, so _mm_load_ps and _mm_loadu_ps are equivalent for neon
return vreinterpretq_m128_f32(vld1q_f32(p));
}
// Loads an single - precision, floating - point value into the low word and clears the upper three words. https://msdn.microsoft.com/en-us/library/548bb9h4%28v=vs.90%29.aspx
FORCE_INLINE __m128 _mm_load_ss(const float * p)
{
return vreinterpretq_m128_f32(vsetq_lane_f32(*p, vdupq_n_f32(0), 0));
}
// ******************************************
// Logic/Binary operations
// ******************************************
// Compares for inequality. https://msdn.microsoft.com/en-us/library/sf44thbx(v=vs.100).aspx
FORCE_INLINE __m128 _mm_cmpneq_ps(__m128 a, __m128 b)
{
return vreinterpretq_m128_u32( vmvnq_u32( vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)) ) );
}
// Computes the bitwise AND-NOT of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/68h7wd02(v=vs.100).aspx
FORCE_INLINE __m128 _mm_andnot_ps(__m128 a, __m128 b)
{
return vreinterpretq_m128_s32( vbicq_s32(vreinterpretq_s32_m128(b), vreinterpretq_s32_m128(a)) ); // *NOTE* argument swap
}
// Computes the bitwise AND of the 128-bit value in b and the bitwise NOT of the 128-bit value in a. https://msdn.microsoft.com/en-us/library/vstudio/1beaceh8(v=vs.100).aspx
FORCE_INLINE __m128i _mm_andnot_si128(__m128i a, __m128i b)
{
return vreinterpretq_m128i_s32( vbicq_s32(vreinterpretq_s32_m128i(b), vreinterpretq_s32_m128i(a)) ); // *NOTE* argument swap
}
// Computes the bitwise AND of the 128-bit value in a and the 128-bit value in b. https://msdn.microsoft.com/en-us/library/vstudio/6d1txsa8(v=vs.100).aspx
FORCE_INLINE __m128i _mm_and_si128(__m128i a, __m128i b)
{
return vreinterpretq_m128i_s32( vandq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)) );
}
// Computes the bitwise AND of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/73ck1xc5(v=vs.100).aspx
FORCE_INLINE __m128 _mm_and_ps(__m128 a, __m128 b)
{
return vreinterpretq_m128_s32( vandq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)) );
}
// Computes the bitwise OR of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/7ctdsyy0(v=vs.100).aspx
FORCE_INLINE __m128 _mm_or_ps(__m128 a, __m128 b)
{
return vreinterpretq_m128_s32( vorrq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)) );
}
// Computes bitwise EXOR (exclusive-or) of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/ss6k3wk8(v=vs.100).aspx
FORCE_INLINE __m128 _mm_xor_ps(__m128 a, __m128 b)
{
return vreinterpretq_m128_s32( veorq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)) );
}
// Computes the bitwise OR of the 128-bit value in a and the 128-bit value in b. https://msdn.microsoft.com/en-us/library/vstudio/ew8ty0db(v=vs.100).aspx
FORCE_INLINE __m128i _mm_or_si128(__m128i a, __m128i b)
{
return vreinterpretq_m128i_s32( vorrq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)) );
}
// Computes the bitwise XOR of the 128-bit value in a and the 128-bit value in b. https://msdn.microsoft.com/en-us/library/fzt08www(v=vs.100).aspx
FORCE_INLINE __m128i _mm_xor_si128(__m128i a, __m128i b)
{
return vreinterpretq_m128i_s32( veorq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)) );
}
// NEON does not provide this method
// Creates a 4-bit mask from the most significant bits of the four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/vstudio/4490ys29(v=vs.100).aspx
FORCE_INLINE int _mm_movemask_ps(__m128 a)
{
#if ENABLE_CPP_VERSION // I am not yet convinced that the NEON version is faster than the C version of this
uint32x4_t &ia = *(uint32x4_t *)&a;
return (ia[0] >> 31) | ((ia[1] >> 30) & 2) | ((ia[2] >> 29) & 4) | ((ia[3] >> 28) & 8);
#else
static const uint32x4_t movemask = { 1, 2, 4, 8 };
static const uint32x4_t highbit = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
uint32x4_t t0 = vreinterpretq_u32_m128(a);
uint32x4_t t1 = vtstq_u32(t0, highbit);
uint32x4_t t2 = vandq_u32(t1, movemask);
uint32x2_t t3 = vorr_u32(vget_low_u32(t2), vget_high_u32(t2));
return vget_lane_u32(t3, 0) | vget_lane_u32(t3, 1);
#endif
}
// Takes the upper 64 bits of a and places it in the low end of the result
// Takes the lower 64 bits of b and places it into the high end of the result.
FORCE_INLINE __m128 _mm_shuffle_ps_1032(__m128 a, __m128 b)
{
float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
return vreinterpretq_m128_f32(vcombine_f32(a32, b10));
}
// takes the lower two 32-bit values from a and swaps them and places in high end of result
// takes the higher two 32 bit values from b and swaps them and places in low end of result.
FORCE_INLINE __m128 _mm_shuffle_ps_2301(__m128 a, __m128 b)
{
float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
float32x2_t b23 = vrev64_f32(vget_high_f32(vreinterpretq_f32_m128(b)));
return vreinterpretq_m128_f32(vcombine_f32(a01, b23));
}
FORCE_INLINE __m128 _mm_shuffle_ps_0321(__m128 a, __m128 b)
{
float32x2_t a21 = vget_high_f32(vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
float32x2_t b03 = vget_low_f32(vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
return vreinterpretq_m128_f32(vcombine_f32(a21, b03));
}
FORCE_INLINE __m128 _mm_shuffle_ps_2103(__m128 a, __m128 b)
{
float32x2_t a03 = vget_low_f32(vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
float32x2_t b21 = vget_high_f32(vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
return vreinterpretq_m128_f32(vcombine_f32(a03, b21));
}
FORCE_INLINE __m128 _mm_shuffle_ps_1010(__m128 a, __m128 b)
{
float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
return vreinterpretq_m128_f32(vcombine_f32(a10, b10));
}
FORCE_INLINE __m128 _mm_shuffle_ps_1001(__m128 a, __m128 b)
{
float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
return vreinterpretq_m128_f32(vcombine_f32(a01, b10));
}
FORCE_INLINE __m128 _mm_shuffle_ps_0101(__m128 a, __m128 b)
{
float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
float32x2_t b01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(b)));
return vreinterpretq_m128_f32(vcombine_f32(a01, b01));
}
// keeps the low 64 bits of b in the low and puts the high 64 bits of a in the high
FORCE_INLINE __m128 _mm_shuffle_ps_3210(__m128 a, __m128 b)
{
float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
return vreinterpretq_m128_f32(vcombine_f32(a10, b32));
}
FORCE_INLINE __m128 _mm_shuffle_ps_0011(__m128 a, __m128 b)
{
float32x2_t a11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 1);
float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
return vreinterpretq_m128_f32(vcombine_f32(a11, b00));
}
FORCE_INLINE __m128 _mm_shuffle_ps_0022(__m128 a, __m128 b)
{
float32x2_t a22 = vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
return vreinterpretq_m128_f32(vcombine_f32(a22, b00));
}
FORCE_INLINE __m128 _mm_shuffle_ps_2200(__m128 a, __m128 b)
{
float32x2_t a00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 0);
float32x2_t b22 = vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(b)), 0);
return vreinterpretq_m128_f32(vcombine_f32(a00, b22));
}
FORCE_INLINE __m128 _mm_shuffle_ps_3202(__m128 a, __m128 b)
{
float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
float32x2_t a22 = vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
float32x2_t a02 = vset_lane_f32(a0, a22, 1); /* apoty: TODO: use vzip ?*/
float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
return vreinterpretq_m128_f32(vcombine_f32(a02, b32));
}
FORCE_INLINE __m128 _mm_shuffle_ps_1133(__m128 a, __m128 b)
{
float32x2_t a33 = vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 1);
float32x2_t b11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 1);
return vreinterpretq_m128_f32(vcombine_f32(a33, b11));
}
FORCE_INLINE __m128 _mm_shuffle_ps_2010(__m128 a, __m128 b)
{
float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
float32_t b2 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 2);
float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
float32x2_t b20 = vset_lane_f32(b2, b00, 1);
return vreinterpretq_m128_f32(vcombine_f32(a10, b20));
}
FORCE_INLINE __m128 _mm_shuffle_ps_2001(__m128 a, __m128 b)
{
float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
float32_t b2 = vgetq_lane_f32(b, 2);
float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
float32x2_t b20 = vset_lane_f32(b2, b00, 1);
return vreinterpretq_m128_f32(vcombine_f32(a01, b20));
}
FORCE_INLINE __m128 _mm_shuffle_ps_2032(__m128 a, __m128 b)
{
float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
float32_t b2 = vgetq_lane_f32(b, 2);
float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
float32x2_t b20 = vset_lane_f32(b2, b00, 1);
return vreinterpretq_m128_f32(vcombine_f32(a32, b20));
}
// NEON does not support a general purpose permute intrinsic
// Currently I am not sure whether the C implementation is faster or slower than the NEON version.
// Note, this has to be expanded as a template because the shuffle value must be an immediate value.
// The same is true on SSE as well.
// Selects four specific single-precision, floating-point values from a and b, based on the mask i. https://msdn.microsoft.com/en-us/library/vstudio/5f0858x0(v=vs.100).aspx
#if ENABLE_CPP_VERSION // I am not convinced that the NEON version is faster than the C version yet.
FORCE_INLINE __m128 _mm_shuffle_ps_default(__m128 a, __m128 b, __constrange(0,255) int imm)
{
__m128 ret;
ret[0] = a[imm & 0x3];
ret[1] = a[(imm >> 2) & 0x3];
ret[2] = b[(imm >> 4) & 0x03];
ret[3] = b[(imm >> 6) & 0x03];
return ret;
}
#else
#define _mm_shuffle_ps_default(a, b, imm) \
({ \
float32x4_t ret; \
ret = vmovq_n_f32(vgetq_lane_f32(vreinterpretq_f32_m128(a), (imm) & 0x3)); \
ret = vsetq_lane_f32(vgetq_lane_f32(vreinterpretq_f32_m128(a), ((imm) >> 2) & 0x3), ret, 1); \
ret = vsetq_lane_f32(vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 4) & 0x3), ret, 2); \
ret = vsetq_lane_f32(vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 6) & 0x3), ret, 3); \
vreinterpretq_m128_f32(ret); \
})
#endif
//FORCE_INLINE __m128 _mm_shuffle_ps(__m128 a, __m128 b, __constrange(0,255) int imm)
#define _mm_shuffle_ps(a, b, imm) \
({ \
__m128 ret; \
switch (imm) \
{ \
case _MM_SHUFFLE(1, 0, 3, 2): ret = _mm_shuffle_ps_1032((a), (b)); break; \
case _MM_SHUFFLE(2, 3, 0, 1): ret = _mm_shuffle_ps_2301((a), (b)); break; \
case _MM_SHUFFLE(0, 3, 2, 1): ret = _mm_shuffle_ps_0321((a), (b)); break; \
case _MM_SHUFFLE(2, 1, 0, 3): ret = _mm_shuffle_ps_2103((a), (b)); break; \
case _MM_SHUFFLE(1, 0, 1, 0): ret = _mm_shuffle_ps_1010((a), (b)); break; \
case _MM_SHUFFLE(1, 0, 0, 1): ret = _mm_shuffle_ps_1001((a), (b)); break; \
case _MM_SHUFFLE(0, 1, 0, 1): ret = _mm_shuffle_ps_0101((a), (b)); break; \
case _MM_SHUFFLE(3, 2, 1, 0): ret = _mm_shuffle_ps_3210((a), (b)); break; \
case _MM_SHUFFLE(0, 0, 1, 1): ret = _mm_shuffle_ps_0011((a), (b)); break; \
case _MM_SHUFFLE(0, 0, 2, 2): ret = _mm_shuffle_ps_0022((a), (b)); break; \
case _MM_SHUFFLE(2, 2, 0, 0): ret = _mm_shuffle_ps_2200((a), (b)); break; \
case _MM_SHUFFLE(3, 2, 0, 2): ret = _mm_shuffle_ps_3202((a), (b)); break; \
case _MM_SHUFFLE(1, 1, 3, 3): ret = _mm_shuffle_ps_1133((a), (b)); break; \
case _MM_SHUFFLE(2, 0, 1, 0): ret = _mm_shuffle_ps_2010((a), (b)); break; \
case _MM_SHUFFLE(2, 0, 0, 1): ret = _mm_shuffle_ps_2001((a), (b)); break; \
case _MM_SHUFFLE(2, 0, 3, 2): ret = _mm_shuffle_ps_2032((a), (b)); break; \
default: ret = _mm_shuffle_ps_default((a), (b), (imm)); break; \
} \
ret; \
})
// Takes the upper 64 bits of a and places it in the low end of the result
// Takes the lower 64 bits of a and places it into the high end of the result.
FORCE_INLINE __m128i _mm_shuffle_epi_1032(__m128i a)
{
int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
return vreinterpretq_m128i_s32(vcombine_s32(a32, a10));
}
// takes the lower two 32-bit values from a and swaps them and places in low end of result
// takes the higher two 32 bit values from a and swaps them and places in high end of result.
FORCE_INLINE __m128i _mm_shuffle_epi_2301(__m128i a)
{
int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
int32x2_t a23 = vrev64_s32(vget_high_s32(vreinterpretq_s32_m128i(a)));
return vreinterpretq_m128i_s32(vcombine_s32(a01, a23));
}
// rotates the least significant 32 bits into the most signficant 32 bits, and shifts the rest down
FORCE_INLINE __m128i _mm_shuffle_epi_0321(__m128i a)
{
return vreinterpretq_m128i_s32(vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 1));
}
// rotates the most significant 32 bits into the least signficant 32 bits, and shifts the rest up
FORCE_INLINE __m128i _mm_shuffle_epi_2103(__m128i a)
{
return vreinterpretq_m128i_s32(vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 3));
}
// gets the lower 64 bits of a, and places it in the upper 64 bits
// gets the lower 64 bits of a and places it in the lower 64 bits
FORCE_INLINE __m128i _mm_shuffle_epi_1010(__m128i a)
{
int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
return vreinterpretq_m128i_s32(vcombine_s32(a10, a10));
}
// gets the lower 64 bits of a, swaps the 0 and 1 elements, and places it in the lower 64 bits
// gets the lower 64 bits of a, and places it in the upper 64 bits
FORCE_INLINE __m128i _mm_shuffle_epi_1001(__m128i a)
{
int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
return vreinterpretq_m128i_s32(vcombine_s32(a01, a10));
}
// gets the lower 64 bits of a, swaps the 0 and 1 elements and places it in the upper 64 bits
// gets the lower 64 bits of a, swaps the 0 and 1 elements, and places it in the lower 64 bits
FORCE_INLINE __m128i _mm_shuffle_epi_0101(__m128i a)
{
int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
return vreinterpretq_m128i_s32(vcombine_s32(a01, a01));
}
FORCE_INLINE __m128i _mm_shuffle_epi_2211(__m128i a)
{
int32x2_t a11 = vdup_lane_s32(vget_low_s32(vreinterpretq_s32_m128i(a)), 1);
int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
return vreinterpretq_m128i_s32(vcombine_s32(a11, a22));
}
FORCE_INLINE __m128i _mm_shuffle_epi_0122(__m128i a)
{
int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
return vreinterpretq_m128i_s32(vcombine_s32(a22, a01));
}
FORCE_INLINE __m128i _mm_shuffle_epi_3332(__m128i a)
{
int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
int32x2_t a33 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 1);
return vreinterpretq_m128i_s32(vcombine_s32(a32, a33));
}
//FORCE_INLINE __m128i _mm_shuffle_epi32_default(__m128i a, __constrange(0,255) int imm)
#if ENABLE_CPP_VERSION
FORCE_INLINE __m128i _mm_shuffle_epi32_default(__m128i a, __constrange(0,255) int imm)
{
__m128i ret;
ret[0] = a[imm & 0x3];
ret[1] = a[(imm >> 2) & 0x3];
ret[2] = a[(imm >> 4) & 0x03];
ret[3] = a[(imm >> 6) & 0x03];
return ret;
}
#else
#define _mm_shuffle_epi32_default(a, imm) \
({ \
int32x4_t ret; \
ret = vmovq_n_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm) & 0x3)); \
ret = vsetq_lane_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 2) & 0x3), ret, 1); \
ret = vsetq_lane_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 4) & 0x3), ret, 2); \
ret = vsetq_lane_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 6) & 0x3), ret, 3); \
vreinterpretq_m128i_s32(ret); \
})
#endif
//FORCE_INLINE __m128i _mm_shuffle_epi32_splat(__m128i a, __constrange(0,255) int imm)
#if defined(__aarch64__)
#define _mm_shuffle_epi32_splat(a, imm) \
({ \
vreinterpretq_m128i_s32(vdupq_laneq_s32(vreinterpretq_s32_m128i(a), (imm))); \
})
#else
#define _mm_shuffle_epi32_splat(a, imm) \
({ \
vreinterpretq_m128i_s32(vdupq_n_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm)))); \
})
#endif
// Shuffles the 4 signed or unsigned 32-bit integers in a as specified by imm. https://msdn.microsoft.com/en-us/library/56f67xbk%28v=vs.90%29.aspx
//FORCE_INLINE __m128i _mm_shuffle_epi32(__m128i a, __constrange(0,255) int imm)
#define _mm_shuffle_epi32(a, imm) \
({ \
__m128i ret; \
switch (imm) \
{ \
case _MM_SHUFFLE(1, 0, 3, 2): ret = _mm_shuffle_epi_1032((a)); break; \
case _MM_SHUFFLE(2, 3, 0, 1): ret = _mm_shuffle_epi_2301((a)); break; \
case _MM_SHUFFLE(0, 3, 2, 1): ret = _mm_shuffle_epi_0321((a)); break; \
case _MM_SHUFFLE(2, 1, 0, 3): ret = _mm_shuffle_epi_2103((a)); break; \
case _MM_SHUFFLE(1, 0, 1, 0): ret = _mm_shuffle_epi_1010((a)); break; \
case _MM_SHUFFLE(1, 0, 0, 1): ret = _mm_shuffle_epi_1001((a)); break; \
case _MM_SHUFFLE(0, 1, 0, 1): ret = _mm_shuffle_epi_0101((a)); break; \
case _MM_SHUFFLE(2, 2, 1, 1): ret = _mm_shuffle_epi_2211((a)); break; \
case _MM_SHUFFLE(0, 1, 2, 2): ret = _mm_shuffle_epi_0122((a)); break; \
case _MM_SHUFFLE(3, 3, 3, 2): ret = _mm_shuffle_epi_3332((a)); break; \
case _MM_SHUFFLE(0, 0, 0, 0): ret = _mm_shuffle_epi32_splat((a),0); break; \
case _MM_SHUFFLE(1, 1, 1, 1): ret = _mm_shuffle_epi32_splat((a),1); break; \
case _MM_SHUFFLE(2, 2, 2, 2): ret = _mm_shuffle_epi32_splat((a),2); break; \
case _MM_SHUFFLE(3, 3, 3, 3): ret = _mm_shuffle_epi32_splat((a),3); break; \
default: ret = _mm_shuffle_epi32_default((a), (imm)); break; \
} \
ret; \
})
// Shuffles the upper 4 signed or unsigned 16 - bit integers in a as specified by imm. https://msdn.microsoft.com/en-us/library/13ywktbs(v=vs.100).aspx
//FORCE_INLINE __m128i _mm_shufflehi_epi16_function(__m128i a, __constrange(0,255) int imm)
#define _mm_shufflehi_epi16_function(a, imm) \
({ \
int16x8_t ret = vreinterpretq_s16_s32(a); \
int16x4_t highBits = vget_high_s16(ret); \
ret = vsetq_lane_s16(vget_lane_s16(highBits, (imm) & 0x3), ret, 4); \
ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 2) & 0x3), ret, 5); \
ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 4) & 0x3), ret, 6); \
ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 6) & 0x3), ret, 7); \
vreinterpretq_s32_s16(ret); \
})
//FORCE_INLINE __m128i _mm_shufflehi_epi16(__m128i a, __constrange(0,255) int imm)
#define _mm_shufflehi_epi16(a, imm) \
_mm_shufflehi_epi16_function((a), (imm))
//added by hasindu
//Shifts the 8 signed or unsigned 16-bit integers in a left by count bits while shifting in zeros. https://msdn.microsoft.com/en-us/library/es73bcsy(v=vs.90).aspx
#define _mm_slli_epi16(a, imm) \
({ \
__m128i ret; \
if ((imm) <= 0) {\
ret = a; \
} \
else if ((imm) > 31) { \
ret = _mm_setzero_si128(); \
} \
else { \
ret = vreinterpretq_m128i_s16(vshlq_n_s16(vreinterpretq_s16_m128i(a), (imm))); \
} \
ret; \
})
// Shifts the 4 signed or unsigned 32-bit integers in a left by count bits while shifting in zeros. : https://msdn.microsoft.com/en-us/library/z2k3bbtb%28v=vs.90%29.aspx
//FORCE_INLINE __m128i _mm_slli_epi32(__m128i a, __constrange(0,255) int imm)
#define _mm_slli_epi32(a, imm) \
({ \
__m128i ret; \
if ((imm) <= 0) {\
ret = a; \
} \
else if ((imm) > 31) { \
ret = _mm_setzero_si128(); \
} \
else { \
ret = vreinterpretq_m128i_s32(vshlq_n_s32(vreinterpretq_s32_m128i(a), (imm))); \
} \
ret; \
})
//added by hasindu
// Shifts the 8 signed or unsigned 16-bit integers in a right by count bits while shifting in zeros.
//https://msdn.microsoft.com/en-us/library/6tcwd38t(v=vs.90).aspx
#define _mm_srli_epi16(a, imm) \
({ \
__m128i ret; \
if ((imm) <= 0) { \
ret = a; \
} \
else if ((imm)> 31) { \
ret = _mm_setzero_si128(); \
} \
else { \
ret = vreinterpretq_m128i_u16(vshrq_n_u16(vreinterpretq_u16_m128i(a), (imm))); \
} \
ret; \
})
//Shifts the 4 signed or unsigned 32-bit integers in a right by count bits while shifting in zeros. https://msdn.microsoft.com/en-us/library/w486zcfa(v=vs.100).aspx
//FORCE_INLINE __m128i _mm_srli_epi32(__m128i a, __constrange(0,255) int imm)
#define _mm_srli_epi32(a, imm) \
({ \
__m128i ret; \
if ((imm) <= 0) { \
ret = a; \
} \
else if ((imm)> 31) { \
ret = _mm_setzero_si128(); \
} \
else { \
ret = vreinterpretq_m128i_u32(vshrq_n_u32(vreinterpretq_u32_m128i(a), (imm))); \
} \
ret; \
})
// Shifts the 4 signed 32 - bit integers in a right by count bits while shifting in the sign bit. https://msdn.microsoft.com/en-us/library/z1939387(v=vs.100).aspx
//FORCE_INLINE __m128i _mm_srai_epi32(__m128i a, __constrange(0,255) int imm)
#define _mm_srai_epi32(a, imm) \
({ \
__m128i ret; \
if ((imm) <= 0) { \
ret = a; \
} \
else if ((imm) > 31) { \
ret = vreinterpretq_m128i_s32(vshrq_n_s32(vreinterpretq_s32_m128i(a), 16)); \
ret = vreinterpretq_m128i_s32(vshrq_n_s32(vreinterpretq_s32_m128i(ret), 16)); \
} \
else { \
ret = vreinterpretq_m128i_s32(vshrq_n_s32(vreinterpretq_s32_m128i(a), (imm))); \
} \
ret; \
})
// Shifts the 128 - bit value in a right by imm bytes while shifting in zeros.imm must be an immediate. https://msdn.microsoft.com/en-us/library/305w28yz(v=vs.100).aspx
//FORCE_INLINE _mm_srli_si128(__m128i a, __constrange(0,255) int imm)
#define _mm_srli_si128(a, imm) \
({ \
__m128i ret; \
if ((imm) <= 0) { \
ret = a; \
} \
else if ((imm) > 15) { \
ret = _mm_setzero_si128(); \
} \
else { \
ret = vreinterpretq_m128i_s8(vextq_s8(vreinterpretq_s8_m128i(a), vdupq_n_s8(0), (imm))); \
} \
ret; \
})
// Shifts the 128-bit value in a left by imm bytes while shifting in zeros. imm must be an immediate. https://msdn.microsoft.com/en-us/library/34d3k2kt(v=vs.100).aspx
//FORCE_INLINE __m128i _mm_slli_si128(__m128i a, __constrange(0,255) int imm)
#define _mm_slli_si128(a, imm) \
({ \
__m128i ret; \
if ((imm) <= 0) { \
ret = a; \
} \
else if ((imm) > 15) { \
ret = _mm_setzero_si128(); \
} \
else { \
ret = vreinterpretq_m128i_s8(vextq_s8(vdupq_n_s8(0), vreinterpretq_s8_m128i(a), 16 - (imm))); \
} \
ret; \
})
// NEON does not provide a version of this function, here is an article about some ways to repro the results.
// http://stackoverflow.com/questions/11870910/sse-mm-movemask-epi8-equivalent-method-for-arm-neon
// Creates a 16-bit mask from the most significant bits of the 16 signed or unsigned 8-bit integers in a and zero extends the upper bits. https://msdn.microsoft.com/en-us/library/vstudio/s090c8fk(v=vs.100).aspx
FORCE_INLINE int _mm_movemask_epi8(__m128i _a)
{
uint8x16_t input = vreinterpretq_u8_m128i(_a);
static const int8_t __attribute__((aligned(16))) xr[8] = { -7, -6, -5, -4, -3, -2, -1, 0 };