avx512erintrin.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. /*===---- avx512erintrin.h - AVX512ER intrinsics ---------------------------===
  2. *
  3. * Permission is hereby granted, free of charge, to any person obtaining a copy
  4. * of this software and associated documentation files (the "Software"), to deal
  5. * in the Software without restriction, including without limitation the rights
  6. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  7. * copies of the Software, and to permit persons to whom the Software is
  8. * furnished to do so, subject to the following conditions:
  9. *
  10. * The above copyright notice and this permission notice shall be included in
  11. * all copies or substantial portions of the Software.
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  16. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  17. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  18. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  19. * THE SOFTWARE.
  20. *
  21. *===-----------------------------------------------------------------------===
  22. */
  23. #ifndef __IMMINTRIN_H
  24. #error "Never use <avx512erintrin.h> directly; include <immintrin.h> instead."
  25. #endif
  26. #ifndef __AVX512ERINTRIN_H
  27. #define __AVX512ERINTRIN_H
  28. // exp2a23
  29. #define _mm512_exp2a23_round_pd(A, R) __extension__ ({ \
  30. (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
  31. (__v8df)_mm512_setzero_pd(), \
  32. (__mmask8)-1, (int)(R)); })
  33. #define _mm512_mask_exp2a23_round_pd(S, M, A, R) __extension__ ({ \
  34. (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
  35. (__v8df)(__m512d)(S), (__mmask8)(M), \
  36. (int)(R)); })
  37. #define _mm512_maskz_exp2a23_round_pd(M, A, R) __extension__ ({ \
  38. (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
  39. (__v8df)_mm512_setzero_pd(), \
  40. (__mmask8)(M), (int)(R)); })
  41. #define _mm512_exp2a23_pd(A) \
  42. _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION)
  43. #define _mm512_mask_exp2a23_pd(S, M, A) \
  44. _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
  45. #define _mm512_maskz_exp2a23_pd(M, A) \
  46. _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
  47. #define _mm512_exp2a23_round_ps(A, R) __extension__ ({ \
  48. (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
  49. (__v16sf)_mm512_setzero_ps(), \
  50. (__mmask16)-1, (int)(R)); })
  51. #define _mm512_mask_exp2a23_round_ps(S, M, A, R) __extension__ ({ \
  52. (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
  53. (__v16sf)(__m512)(S), (__mmask16)(M), \
  54. (int)(R)); })
  55. #define _mm512_maskz_exp2a23_round_ps(M, A, R) __extension__ ({ \
  56. (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
  57. (__v16sf)_mm512_setzero_ps(), \
  58. (__mmask16)(M), (int)(R)); })
  59. #define _mm512_exp2a23_ps(A) \
  60. _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION)
  61. #define _mm512_mask_exp2a23_ps(S, M, A) \
  62. _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
  63. #define _mm512_maskz_exp2a23_ps(M, A) \
  64. _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
  65. // rsqrt28
  66. #define _mm512_rsqrt28_round_pd(A, R) __extension__ ({ \
  67. (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
  68. (__v8df)_mm512_setzero_pd(), \
  69. (__mmask8)-1, (int)(R)); })
  70. #define _mm512_mask_rsqrt28_round_pd(S, M, A, R) __extension__ ({ \
  71. (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
  72. (__v8df)(__m512d)(S), (__mmask8)(M), \
  73. (int)(R)); })
  74. #define _mm512_maskz_rsqrt28_round_pd(M, A, R) __extension__ ({ \
  75. (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
  76. (__v8df)_mm512_setzero_pd(), \
  77. (__mmask8)(M), (int)(R)); })
  78. #define _mm512_rsqrt28_pd(A) \
  79. _mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
  80. #define _mm512_mask_rsqrt28_pd(S, M, A) \
  81. _mm512_mask_rsqrt28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
  82. #define _mm512_maskz_rsqrt28_pd(M, A) \
  83. _mm512_maskz_rsqrt28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
  84. #define _mm512_rsqrt28_round_ps(A, R) __extension__ ({ \
  85. (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
  86. (__v16sf)_mm512_setzero_ps(), \
  87. (__mmask16)-1, (int)(R)); })
  88. #define _mm512_mask_rsqrt28_round_ps(S, M, A, R) __extension__ ({ \
  89. (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
  90. (__v16sf)(__m512)(S), (__mmask16)(M), \
  91. (int)(R)); })
  92. #define _mm512_maskz_rsqrt28_round_ps(M, A, R) __extension__ ({ \
  93. (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
  94. (__v16sf)_mm512_setzero_ps(), \
  95. (__mmask16)(M), (int)(R)); })
  96. #define _mm512_rsqrt28_ps(A) \
  97. _mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
  98. #define _mm512_mask_rsqrt28_ps(S, M, A) \
  99. _mm512_mask_rsqrt28_round_ps((S), (M), A, _MM_FROUND_CUR_DIRECTION)
  100. #define _mm512_maskz_rsqrt28_ps(M, A) \
  101. _mm512_maskz_rsqrt28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
  102. #define _mm_rsqrt28_round_ss(A, B, R) __extension__ ({ \
  103. (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
  104. (__v4sf)(__m128)(B), \
  105. (__v4sf)_mm_setzero_ps(), \
  106. (__mmask8)-1, (int)(R)); })
  107. #define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) __extension__ ({ \
  108. (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
  109. (__v4sf)(__m128)(B), \
  110. (__v4sf)(__m128)(S), \
  111. (__mmask8)(M), (int)(R)); })
  112. #define _mm_maskz_rsqrt28_round_ss(M, A, B, R) __extension__ ({ \
  113. (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
  114. (__v4sf)(__m128)(B), \
  115. (__v4sf)_mm_setzero_ps(), \
  116. (__mmask8)(M), (int)(R)); })
  117. #define _mm_rsqrt28_ss(A, B) \
  118. _mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
  119. #define _mm_mask_rsqrt28_ss(S, M, A, B) \
  120. _mm_mask_rsqrt28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
  121. #define _mm_maskz_rsqrt28_ss(M, A, B) \
  122. _mm_maskz_rsqrt28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
  123. #define _mm_rsqrt28_round_sd(A, B, R) __extension__ ({ \
  124. (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
  125. (__v2df)(__m128d)(B), \
  126. (__v2df)_mm_setzero_pd(), \
  127. (__mmask8)-1, (int)(R)); })
  128. #define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) __extension__ ({ \
  129. (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
  130. (__v2df)(__m128d)(B), \
  131. (__v2df)(__m128d)(S), \
  132. (__mmask8)(M), (int)(R)); })
  133. #define _mm_maskz_rsqrt28_round_sd(M, A, B, R) __extension__ ({ \
  134. (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
  135. (__v2df)(__m128d)(B), \
  136. (__v2df)_mm_setzero_pd(), \
  137. (__mmask8)(M), (int)(R)); })
  138. #define _mm_rsqrt28_sd(A, B) \
  139. _mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
  140. #define _mm_mask_rsqrt28_sd(S, M, A, B) \
  141. _mm_mask_rsqrt28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
  142. #define _mm_maskz_rsqrt28_sd(M, A, B) \
  143. _mm_maskz_rsqrt28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
  144. // rcp28
  145. #define _mm512_rcp28_round_pd(A, R) __extension__ ({ \
  146. (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
  147. (__v8df)_mm512_setzero_pd(), \
  148. (__mmask8)-1, (int)(R)); })
  149. #define _mm512_mask_rcp28_round_pd(S, M, A, R) __extension__ ({ \
  150. (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
  151. (__v8df)(__m512d)(S), (__mmask8)(M), \
  152. (int)(R)); })
  153. #define _mm512_maskz_rcp28_round_pd(M, A, R) __extension__ ({ \
  154. (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
  155. (__v8df)_mm512_setzero_pd(), \
  156. (__mmask8)(M), (int)(R)); })
  157. #define _mm512_rcp28_pd(A) \
  158. _mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
  159. #define _mm512_mask_rcp28_pd(S, M, A) \
  160. _mm512_mask_rcp28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
  161. #define _mm512_maskz_rcp28_pd(M, A) \
  162. _mm512_maskz_rcp28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
  163. #define _mm512_rcp28_round_ps(A, R) __extension__ ({ \
  164. (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
  165. (__v16sf)_mm512_setzero_ps(), \
  166. (__mmask16)-1, (int)(R)); })
  167. #define _mm512_mask_rcp28_round_ps(S, M, A, R) __extension__ ({ \
  168. (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
  169. (__v16sf)(__m512)(S), (__mmask16)(M), \
  170. (int)(R)); })
  171. #define _mm512_maskz_rcp28_round_ps(M, A, R) __extension__ ({ \
  172. (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
  173. (__v16sf)_mm512_setzero_ps(), \
  174. (__mmask16)(M), (int)(R)); })
  175. #define _mm512_rcp28_ps(A) \
  176. _mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
  177. #define _mm512_mask_rcp28_ps(S, M, A) \
  178. _mm512_mask_rcp28_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
  179. #define _mm512_maskz_rcp28_ps(M, A) \
  180. _mm512_maskz_rcp28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
  181. #define _mm_rcp28_round_ss(A, B, R) __extension__ ({ \
  182. (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
  183. (__v4sf)(__m128)(B), \
  184. (__v4sf)_mm_setzero_ps(), \
  185. (__mmask8)-1, (int)(R)); })
  186. #define _mm_mask_rcp28_round_ss(S, M, A, B, R) __extension__ ({ \
  187. (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
  188. (__v4sf)(__m128)(B), \
  189. (__v4sf)(__m128)(S), \
  190. (__mmask8)(M), (int)(R)); })
  191. #define _mm_maskz_rcp28_round_ss(M, A, B, R) __extension__ ({ \
  192. (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
  193. (__v4sf)(__m128)(B), \
  194. (__v4sf)_mm_setzero_ps(), \
  195. (__mmask8)(M), (int)(R)); })
  196. #define _mm_rcp28_ss(A, B) \
  197. _mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
  198. #define _mm_mask_rcp28_ss(S, M, A, B) \
  199. _mm_mask_rcp28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
  200. #define _mm_maskz_rcp28_ss(M, A, B) \
  201. _mm_maskz_rcp28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
  202. #define _mm_rcp28_round_sd(A, B, R) __extension__ ({ \
  203. (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
  204. (__v2df)(__m128d)(B), \
  205. (__v2df)_mm_setzero_pd(), \
  206. (__mmask8)-1, (int)(R)); })
  207. #define _mm_mask_rcp28_round_sd(S, M, A, B, R) __extension__ ({ \
  208. (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
  209. (__v2df)(__m128d)(B), \
  210. (__v2df)(__m128d)(S), \
  211. (__mmask8)(M), (int)(R)); })
  212. #define _mm_maskz_rcp28_round_sd(M, A, B, R) __extension__ ({ \
  213. (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
  214. (__v2df)(__m128d)(B), \
  215. (__v2df)_mm_setzero_pd(), \
  216. (__mmask8)(M), (int)(R)); })
  217. #define _mm_rcp28_sd(A, B) \
  218. _mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
  219. #define _mm_mask_rcp28_sd(S, M, A, B) \
  220. _mm_mask_rcp28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
  221. #define _mm_maskz_rcp28_sd(M, A, B) \
  222. _mm_maskz_rcp28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
  223. #endif // __AVX512ERINTRIN_H