xmmintrin.h 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273
  1. /* Copyright (C) 2002-2019 Free Software Foundation, Inc.
  2. This file is part of GCC.
  3. GCC is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 3, or (at your option)
  6. any later version.
  7. GCC is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. Under Section 7 of GPL version 3, you are granted additional
  12. permissions described in the GCC Runtime Library Exception, version
  13. 3.1, as published by the Free Software Foundation.
  14. You should have received a copy of the GNU General Public License and
  15. a copy of the GCC Runtime Library Exception along with this program;
  16. see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  17. <http://www.gnu.org/licenses/>. */
  18. /* Implemented from the specification included in the Intel C++ Compiler
  19. User Guide and Reference, version 9.0. */
  20. #ifndef _XMMINTRIN_H_INCLUDED
  21. #define _XMMINTRIN_H_INCLUDED
  22. /* We need type definitions from the MMX header file. */
  23. #include <mmintrin.h>
  24. /* Get _mm_malloc () and _mm_free (). */
  25. #include <mm_malloc.h>
  26. /* Constants for use with _mm_prefetch. */
  27. enum _mm_hint
  28. {
  29. /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit. */
  30. _MM_HINT_ET0 = 7,
  31. _MM_HINT_ET1 = 6,
  32. _MM_HINT_T0 = 3,
  33. _MM_HINT_T1 = 2,
  34. _MM_HINT_T2 = 1,
  35. _MM_HINT_NTA = 0
  36. };
  37. /* Loads one cache line from address P to a location "closer" to the
  38. processor. The selector I specifies the type of prefetch operation. */
  39. #ifdef __OPTIMIZE__
  40. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  41. _mm_prefetch (const void *__P, enum _mm_hint __I)
  42. {
  43. __builtin_prefetch (__P, (__I & 0x4) >> 2, __I & 0x3);
  44. }
  45. #else
  46. #define _mm_prefetch(P, I) \
  47. __builtin_prefetch ((P), ((I & 0x4) >> 2), (I & 0x3))
  48. #endif
  49. #ifndef __SSE__
  50. #pragma GCC push_options
  51. #pragma GCC target("sse")
  52. #define __DISABLE_SSE__
  53. #endif /* __SSE__ */
  54. /* The Intel API is flexible enough that we must allow aliasing with other
  55. vector types, and their scalar components. */
  56. typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
  57. /* Unaligned version of the same type. */
  58. typedef float __m128_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1)));
  59. /* Internal data types for implementing the intrinsics. */
  60. typedef float __v4sf __attribute__ ((__vector_size__ (16)));
  61. /* Create a selector for use with the SHUFPS instruction. */
  62. #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
  63. (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
  64. /* Bits in the MXCSR. */
  65. #define _MM_EXCEPT_MASK 0x003f
  66. #define _MM_EXCEPT_INVALID 0x0001
  67. #define _MM_EXCEPT_DENORM 0x0002
  68. #define _MM_EXCEPT_DIV_ZERO 0x0004
  69. #define _MM_EXCEPT_OVERFLOW 0x0008
  70. #define _MM_EXCEPT_UNDERFLOW 0x0010
  71. #define _MM_EXCEPT_INEXACT 0x0020
  72. #define _MM_MASK_MASK 0x1f80
  73. #define _MM_MASK_INVALID 0x0080
  74. #define _MM_MASK_DENORM 0x0100
  75. #define _MM_MASK_DIV_ZERO 0x0200
  76. #define _MM_MASK_OVERFLOW 0x0400
  77. #define _MM_MASK_UNDERFLOW 0x0800
  78. #define _MM_MASK_INEXACT 0x1000
  79. #define _MM_ROUND_MASK 0x6000
  80. #define _MM_ROUND_NEAREST 0x0000
  81. #define _MM_ROUND_DOWN 0x2000
  82. #define _MM_ROUND_UP 0x4000
  83. #define _MM_ROUND_TOWARD_ZERO 0x6000
  84. #define _MM_FLUSH_ZERO_MASK 0x8000
  85. #define _MM_FLUSH_ZERO_ON 0x8000
  86. #define _MM_FLUSH_ZERO_OFF 0x0000
  87. /* Create an undefined vector. */
  88. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  89. _mm_undefined_ps (void)
  90. {
  91. __m128 __Y = __Y;
  92. return __Y;
  93. }
  94. /* Create a vector of zeros. */
  95. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  96. _mm_setzero_ps (void)
  97. {
  98. return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
  99. }
  100. /* Perform the respective operation on the lower SPFP (single-precision
  101. floating-point) values of A and B; the upper three SPFP values are
  102. passed through from A. */
  103. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  104. _mm_add_ss (__m128 __A, __m128 __B)
  105. {
  106. return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
  107. }
  108. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  109. _mm_sub_ss (__m128 __A, __m128 __B)
  110. {
  111. return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
  112. }
  113. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  114. _mm_mul_ss (__m128 __A, __m128 __B)
  115. {
  116. return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
  117. }
  118. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  119. _mm_div_ss (__m128 __A, __m128 __B)
  120. {
  121. return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
  122. }
  123. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  124. _mm_sqrt_ss (__m128 __A)
  125. {
  126. return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
  127. }
  128. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  129. _mm_rcp_ss (__m128 __A)
  130. {
  131. return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
  132. }
  133. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  134. _mm_rsqrt_ss (__m128 __A)
  135. {
  136. return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
  137. }
  138. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  139. _mm_min_ss (__m128 __A, __m128 __B)
  140. {
  141. return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
  142. }
  143. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  144. _mm_max_ss (__m128 __A, __m128 __B)
  145. {
  146. return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
  147. }
  148. /* Perform the respective operation on the four SPFP values in A and B. */
  149. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  150. _mm_add_ps (__m128 __A, __m128 __B)
  151. {
  152. return (__m128) ((__v4sf)__A + (__v4sf)__B);
  153. }
  154. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  155. _mm_sub_ps (__m128 __A, __m128 __B)
  156. {
  157. return (__m128) ((__v4sf)__A - (__v4sf)__B);
  158. }
  159. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  160. _mm_mul_ps (__m128 __A, __m128 __B)
  161. {
  162. return (__m128) ((__v4sf)__A * (__v4sf)__B);
  163. }
  164. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  165. _mm_div_ps (__m128 __A, __m128 __B)
  166. {
  167. return (__m128) ((__v4sf)__A / (__v4sf)__B);
  168. }
  169. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  170. _mm_sqrt_ps (__m128 __A)
  171. {
  172. return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
  173. }
  174. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  175. _mm_rcp_ps (__m128 __A)
  176. {
  177. return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
  178. }
  179. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  180. _mm_rsqrt_ps (__m128 __A)
  181. {
  182. return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
  183. }
  184. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  185. _mm_min_ps (__m128 __A, __m128 __B)
  186. {
  187. return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
  188. }
  189. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  190. _mm_max_ps (__m128 __A, __m128 __B)
  191. {
  192. return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
  193. }
  194. /* Perform logical bit-wise operations on 128-bit values. */
  195. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  196. _mm_and_ps (__m128 __A, __m128 __B)
  197. {
  198. return __builtin_ia32_andps (__A, __B);
  199. }
  200. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  201. _mm_andnot_ps (__m128 __A, __m128 __B)
  202. {
  203. return __builtin_ia32_andnps (__A, __B);
  204. }
  205. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  206. _mm_or_ps (__m128 __A, __m128 __B)
  207. {
  208. return __builtin_ia32_orps (__A, __B);
  209. }
  210. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  211. _mm_xor_ps (__m128 __A, __m128 __B)
  212. {
  213. return __builtin_ia32_xorps (__A, __B);
  214. }
  215. /* Perform a comparison on the lower SPFP values of A and B. If the
  216. comparison is true, place a mask of all ones in the result, otherwise a
  217. mask of zeros. The upper three SPFP values are passed through from A. */
  218. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  219. _mm_cmpeq_ss (__m128 __A, __m128 __B)
  220. {
  221. return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
  222. }
  223. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  224. _mm_cmplt_ss (__m128 __A, __m128 __B)
  225. {
  226. return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
  227. }
  228. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  229. _mm_cmple_ss (__m128 __A, __m128 __B)
  230. {
  231. return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
  232. }
  233. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  234. _mm_cmpgt_ss (__m128 __A, __m128 __B)
  235. {
  236. return (__m128) __builtin_ia32_movss ((__v4sf) __A,
  237. (__v4sf)
  238. __builtin_ia32_cmpltss ((__v4sf) __B,
  239. (__v4sf)
  240. __A));
  241. }
  242. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  243. _mm_cmpge_ss (__m128 __A, __m128 __B)
  244. {
  245. return (__m128) __builtin_ia32_movss ((__v4sf) __A,
  246. (__v4sf)
  247. __builtin_ia32_cmpless ((__v4sf) __B,
  248. (__v4sf)
  249. __A));
  250. }
  251. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  252. _mm_cmpneq_ss (__m128 __A, __m128 __B)
  253. {
  254. return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
  255. }
  256. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  257. _mm_cmpnlt_ss (__m128 __A, __m128 __B)
  258. {
  259. return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
  260. }
  261. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  262. _mm_cmpnle_ss (__m128 __A, __m128 __B)
  263. {
  264. return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
  265. }
  266. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  267. _mm_cmpngt_ss (__m128 __A, __m128 __B)
  268. {
  269. return (__m128) __builtin_ia32_movss ((__v4sf) __A,
  270. (__v4sf)
  271. __builtin_ia32_cmpnltss ((__v4sf) __B,
  272. (__v4sf)
  273. __A));
  274. }
  275. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  276. _mm_cmpnge_ss (__m128 __A, __m128 __B)
  277. {
  278. return (__m128) __builtin_ia32_movss ((__v4sf) __A,
  279. (__v4sf)
  280. __builtin_ia32_cmpnless ((__v4sf) __B,
  281. (__v4sf)
  282. __A));
  283. }
  284. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  285. _mm_cmpord_ss (__m128 __A, __m128 __B)
  286. {
  287. return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
  288. }
  289. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  290. _mm_cmpunord_ss (__m128 __A, __m128 __B)
  291. {
  292. return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
  293. }
  294. /* Perform a comparison on the four SPFP values of A and B. For each
  295. element, if the comparison is true, place a mask of all ones in the
  296. result, otherwise a mask of zeros. */
  297. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  298. _mm_cmpeq_ps (__m128 __A, __m128 __B)
  299. {
  300. return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
  301. }
  302. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  303. _mm_cmplt_ps (__m128 __A, __m128 __B)
  304. {
  305. return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
  306. }
  307. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  308. _mm_cmple_ps (__m128 __A, __m128 __B)
  309. {
  310. return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
  311. }
  312. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  313. _mm_cmpgt_ps (__m128 __A, __m128 __B)
  314. {
  315. return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
  316. }
  317. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  318. _mm_cmpge_ps (__m128 __A, __m128 __B)
  319. {
  320. return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
  321. }
  322. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  323. _mm_cmpneq_ps (__m128 __A, __m128 __B)
  324. {
  325. return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
  326. }
  327. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  328. _mm_cmpnlt_ps (__m128 __A, __m128 __B)
  329. {
  330. return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
  331. }
  332. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  333. _mm_cmpnle_ps (__m128 __A, __m128 __B)
  334. {
  335. return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
  336. }
  337. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  338. _mm_cmpngt_ps (__m128 __A, __m128 __B)
  339. {
  340. return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
  341. }
  342. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  343. _mm_cmpnge_ps (__m128 __A, __m128 __B)
  344. {
  345. return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
  346. }
  347. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  348. _mm_cmpord_ps (__m128 __A, __m128 __B)
  349. {
  350. return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
  351. }
  352. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  353. _mm_cmpunord_ps (__m128 __A, __m128 __B)
  354. {
  355. return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
  356. }
  357. /* Compare the lower SPFP values of A and B and return 1 if true
  358. and 0 if false. */
  359. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  360. _mm_comieq_ss (__m128 __A, __m128 __B)
  361. {
  362. return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
  363. }
  364. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  365. _mm_comilt_ss (__m128 __A, __m128 __B)
  366. {
  367. return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
  368. }
  369. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  370. _mm_comile_ss (__m128 __A, __m128 __B)
  371. {
  372. return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
  373. }
  374. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  375. _mm_comigt_ss (__m128 __A, __m128 __B)
  376. {
  377. return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
  378. }
  379. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  380. _mm_comige_ss (__m128 __A, __m128 __B)
  381. {
  382. return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
  383. }
  384. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  385. _mm_comineq_ss (__m128 __A, __m128 __B)
  386. {
  387. return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
  388. }
  389. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  390. _mm_ucomieq_ss (__m128 __A, __m128 __B)
  391. {
  392. return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
  393. }
  394. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  395. _mm_ucomilt_ss (__m128 __A, __m128 __B)
  396. {
  397. return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
  398. }
  399. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  400. _mm_ucomile_ss (__m128 __A, __m128 __B)
  401. {
  402. return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
  403. }
  404. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  405. _mm_ucomigt_ss (__m128 __A, __m128 __B)
  406. {
  407. return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
  408. }
  409. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  410. _mm_ucomige_ss (__m128 __A, __m128 __B)
  411. {
  412. return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
  413. }
  414. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  415. _mm_ucomineq_ss (__m128 __A, __m128 __B)
  416. {
  417. return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
  418. }
  419. /* Convert the lower SPFP value to a 32-bit integer according to the current
  420. rounding mode. */
  421. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  422. _mm_cvtss_si32 (__m128 __A)
  423. {
  424. return __builtin_ia32_cvtss2si ((__v4sf) __A);
  425. }
  426. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  427. _mm_cvt_ss2si (__m128 __A)
  428. {
  429. return _mm_cvtss_si32 (__A);
  430. }
  431. #ifdef __x86_64__
  432. /* Convert the lower SPFP value to a 32-bit integer according to the
  433. current rounding mode. */
  434. /* Intel intrinsic. */
  435. extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  436. _mm_cvtss_si64 (__m128 __A)
  437. {
  438. return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
  439. }
  440. /* Microsoft intrinsic. */
  441. extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  442. _mm_cvtss_si64x (__m128 __A)
  443. {
  444. return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
  445. }
  446. #endif
  447. /* Convert the two lower SPFP values to 32-bit integers according to the
  448. current rounding mode. Return the integers in packed form. */
  449. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  450. _mm_cvtps_pi32 (__m128 __A)
  451. {
  452. return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
  453. }
  454. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  455. _mm_cvt_ps2pi (__m128 __A)
  456. {
  457. return _mm_cvtps_pi32 (__A);
  458. }
  459. /* Truncate the lower SPFP value to a 32-bit integer. */
  460. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  461. _mm_cvttss_si32 (__m128 __A)
  462. {
  463. return __builtin_ia32_cvttss2si ((__v4sf) __A);
  464. }
  465. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  466. _mm_cvtt_ss2si (__m128 __A)
  467. {
  468. return _mm_cvttss_si32 (__A);
  469. }
  470. #ifdef __x86_64__
  471. /* Truncate the lower SPFP value to a 32-bit integer. */
  472. /* Intel intrinsic. */
  473. extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  474. _mm_cvttss_si64 (__m128 __A)
  475. {
  476. return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
  477. }
  478. /* Microsoft intrinsic. */
  479. extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  480. _mm_cvttss_si64x (__m128 __A)
  481. {
  482. return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
  483. }
  484. #endif
  485. /* Truncate the two lower SPFP values to 32-bit integers. Return the
  486. integers in packed form. */
  487. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  488. _mm_cvttps_pi32 (__m128 __A)
  489. {
  490. return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
  491. }
  492. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  493. _mm_cvtt_ps2pi (__m128 __A)
  494. {
  495. return _mm_cvttps_pi32 (__A);
  496. }
  497. /* Convert B to a SPFP value and insert it as element zero in A. */
  498. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  499. _mm_cvtsi32_ss (__m128 __A, int __B)
  500. {
  501. return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
  502. }
  503. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  504. _mm_cvt_si2ss (__m128 __A, int __B)
  505. {
  506. return _mm_cvtsi32_ss (__A, __B);
  507. }
  508. #ifdef __x86_64__
  509. /* Convert B to a SPFP value and insert it as element zero in A. */
  510. /* Intel intrinsic. */
  511. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  512. _mm_cvtsi64_ss (__m128 __A, long long __B)
  513. {
  514. return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
  515. }
  516. /* Microsoft intrinsic. */
  517. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  518. _mm_cvtsi64x_ss (__m128 __A, long long __B)
  519. {
  520. return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
  521. }
  522. #endif
  523. /* Convert the two 32-bit values in B to SPFP form and insert them
  524. as the two lower elements in A. */
  525. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  526. _mm_cvtpi32_ps (__m128 __A, __m64 __B)
  527. {
  528. return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
  529. }
  530. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  531. _mm_cvt_pi2ps (__m128 __A, __m64 __B)
  532. {
  533. return _mm_cvtpi32_ps (__A, __B);
  534. }
  535. /* Convert the four signed 16-bit values in A to SPFP form. */
  536. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  537. _mm_cvtpi16_ps (__m64 __A)
  538. {
  539. __v4hi __sign;
  540. __v2si __hisi, __losi;
  541. __v4sf __zero, __ra, __rb;
  542. /* This comparison against zero gives us a mask that can be used to
  543. fill in the missing sign bits in the unpack operations below, so
  544. that we get signed values after unpacking. */
  545. __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A);
  546. /* Convert the four words to doublewords. */
  547. __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
  548. __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
  549. /* Convert the doublewords to floating point two at a time. */
  550. __zero = (__v4sf) _mm_setzero_ps ();
  551. __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
  552. __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
  553. return (__m128) __builtin_ia32_movlhps (__ra, __rb);
  554. }
  555. /* Convert the four unsigned 16-bit values in A to SPFP form. */
  556. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  557. _mm_cvtpu16_ps (__m64 __A)
  558. {
  559. __v2si __hisi, __losi;
  560. __v4sf __zero, __ra, __rb;
  561. /* Convert the four words to doublewords. */
  562. __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL);
  563. __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL);
  564. /* Convert the doublewords to floating point two at a time. */
  565. __zero = (__v4sf) _mm_setzero_ps ();
  566. __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
  567. __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
  568. return (__m128) __builtin_ia32_movlhps (__ra, __rb);
  569. }
  570. /* Convert the low four signed 8-bit values in A to SPFP form. */
  571. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  572. _mm_cvtpi8_ps (__m64 __A)
  573. {
  574. __v8qi __sign;
  575. /* This comparison against zero gives us a mask that can be used to
  576. fill in the missing sign bits in the unpack operations below, so
  577. that we get signed values after unpacking. */
  578. __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A);
  579. /* Convert the four low bytes to words. */
  580. __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
  581. return _mm_cvtpi16_ps(__A);
  582. }
  583. /* Convert the low four unsigned 8-bit values in A to SPFP form. */
  584. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  585. _mm_cvtpu8_ps(__m64 __A)
  586. {
  587. __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
  588. return _mm_cvtpu16_ps(__A);
  589. }
  590. /* Convert the four signed 32-bit values in A and B to SPFP form. */
  591. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  592. _mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
  593. {
  594. __v4sf __zero = (__v4sf) _mm_setzero_ps ();
  595. __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
  596. __v4sf __sfb = __builtin_ia32_cvtpi2ps (__sfa, (__v2si)__B);
  597. return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
  598. }
  599. /* Convert the four SPFP values in A to four signed 16-bit integers. */
  600. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  601. _mm_cvtps_pi16(__m128 __A)
  602. {
  603. __v4sf __hisf = (__v4sf)__A;
  604. __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
  605. __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
  606. __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
  607. return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
  608. }
  609. /* Convert the four SPFP values in A to four signed 8-bit integers. */
  610. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  611. _mm_cvtps_pi8(__m128 __A)
  612. {
  613. __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
  614. return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL);
  615. }
  616. /* Selects four specific SPFP values from A and B based on MASK. */
  617. #ifdef __OPTIMIZE__
  618. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  619. _mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask)
  620. {
  621. return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
  622. }
  623. #else
  624. #define _mm_shuffle_ps(A, B, MASK) \
  625. ((__m128) __builtin_ia32_shufps ((__v4sf)(__m128)(A), \
  626. (__v4sf)(__m128)(B), (int)(MASK)))
  627. #endif
  628. /* Selects and interleaves the upper two SPFP values from A and B. */
  629. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  630. _mm_unpackhi_ps (__m128 __A, __m128 __B)
  631. {
  632. return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
  633. }
  634. /* Selects and interleaves the lower two SPFP values from A and B. */
  635. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  636. _mm_unpacklo_ps (__m128 __A, __m128 __B)
  637. {
  638. return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
  639. }
  640. /* Sets the upper two SPFP values with 64-bits of data loaded from P;
  641. the lower two values are passed through from A. */
  642. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  643. _mm_loadh_pi (__m128 __A, __m64 const *__P)
  644. {
  645. return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (const __v2sf *)__P);
  646. }
  647. /* Stores the upper two SPFP values of A into P. */
  648. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  649. _mm_storeh_pi (__m64 *__P, __m128 __A)
  650. {
  651. __builtin_ia32_storehps ((__v2sf *)__P, (__v4sf)__A);
  652. }
  653. /* Moves the upper two values of B into the lower two values of A. */
  654. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  655. _mm_movehl_ps (__m128 __A, __m128 __B)
  656. {
  657. return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
  658. }
  659. /* Moves the lower two values of B into the upper two values of A. */
  660. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  661. _mm_movelh_ps (__m128 __A, __m128 __B)
  662. {
  663. return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
  664. }
  665. /* Sets the lower two SPFP values with 64-bits of data loaded from P;
  666. the upper two values are passed through from A. */
  667. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  668. _mm_loadl_pi (__m128 __A, __m64 const *__P)
  669. {
  670. return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (const __v2sf *)__P);
  671. }
  672. /* Stores the lower two SPFP values of A into P. */
  673. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  674. _mm_storel_pi (__m64 *__P, __m128 __A)
  675. {
  676. __builtin_ia32_storelps ((__v2sf *)__P, (__v4sf)__A);
  677. }
  678. /* Creates a 4-bit mask from the most significant bits of the SPFP values. */
  679. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  680. _mm_movemask_ps (__m128 __A)
  681. {
  682. return __builtin_ia32_movmskps ((__v4sf)__A);
  683. }
  684. /* Return the contents of the control register. */
  685. extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  686. _mm_getcsr (void)
  687. {
  688. return __builtin_ia32_stmxcsr ();
  689. }
  690. /* Read exception bits from the control register. */
  691. extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  692. _MM_GET_EXCEPTION_STATE (void)
  693. {
  694. return _mm_getcsr() & _MM_EXCEPT_MASK;
  695. }
  696. extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  697. _MM_GET_EXCEPTION_MASK (void)
  698. {
  699. return _mm_getcsr() & _MM_MASK_MASK;
  700. }
  701. extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  702. _MM_GET_ROUNDING_MODE (void)
  703. {
  704. return _mm_getcsr() & _MM_ROUND_MASK;
  705. }
  706. extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  707. _MM_GET_FLUSH_ZERO_MODE (void)
  708. {
  709. return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
  710. }
  711. /* Set the control register to I. */
  712. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  713. _mm_setcsr (unsigned int __I)
  714. {
  715. __builtin_ia32_ldmxcsr (__I);
  716. }
  717. /* Set exception bits in the control register. */
  718. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  719. _MM_SET_EXCEPTION_STATE(unsigned int __mask)
  720. {
  721. _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
  722. }
  723. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  724. _MM_SET_EXCEPTION_MASK (unsigned int __mask)
  725. {
  726. _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
  727. }
  728. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  729. _MM_SET_ROUNDING_MODE (unsigned int __mode)
  730. {
  731. _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
  732. }
  733. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  734. _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
  735. {
  736. _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
  737. }
  738. /* Create a vector with element 0 as F and the rest zero. */
  739. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  740. _mm_set_ss (float __F)
  741. {
  742. return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
  743. }
  744. /* Create a vector with all four elements equal to F. */
  745. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  746. _mm_set1_ps (float __F)
  747. {
  748. return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
  749. }
  750. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  751. _mm_set_ps1 (float __F)
  752. {
  753. return _mm_set1_ps (__F);
  754. }
  755. /* Create a vector with element 0 as *P and the rest zero. */
  756. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  757. _mm_load_ss (float const *__P)
  758. {
  759. return _mm_set_ss (*__P);
  760. }
  761. /* Create a vector with all four elements equal to *P. */
  762. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  763. _mm_load1_ps (float const *__P)
  764. {
  765. return _mm_set1_ps (*__P);
  766. }
  767. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  768. _mm_load_ps1 (float const *__P)
  769. {
  770. return _mm_load1_ps (__P);
  771. }
  772. /* Load four SPFP values from P. The address must be 16-byte aligned. */
  773. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  774. _mm_load_ps (float const *__P)
  775. {
  776. return *(__m128 *)__P;
  777. }
  778. /* Load four SPFP values from P. The address need not be 16-byte aligned. */
  779. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  780. _mm_loadu_ps (float const *__P)
  781. {
  782. return *(__m128_u *)__P;
  783. }
  784. /* Load four SPFP values in reverse order. The address must be aligned. */
  785. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  786. _mm_loadr_ps (float const *__P)
  787. {
  788. __v4sf __tmp = *(__v4sf *)__P;
  789. return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
  790. }
  791. /* Create the vector [Z Y X W]. */
  792. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  793. _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
  794. {
  795. return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
  796. }
  797. /* Create the vector [W X Y Z]. */
  798. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  799. _mm_setr_ps (float __Z, float __Y, float __X, float __W)
  800. {
  801. return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
  802. }
  803. /* Stores the lower SPFP value. */
  804. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  805. _mm_store_ss (float *__P, __m128 __A)
  806. {
  807. *__P = ((__v4sf)__A)[0];
  808. }
  809. extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  810. _mm_cvtss_f32 (__m128 __A)
  811. {
  812. return ((__v4sf)__A)[0];
  813. }
  814. /* Store four SPFP values. The address must be 16-byte aligned. */
  815. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  816. _mm_store_ps (float *__P, __m128 __A)
  817. {
  818. *(__m128 *)__P = __A;
  819. }
  820. /* Store four SPFP values. The address need not be 16-byte aligned. */
  821. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  822. _mm_storeu_ps (float *__P, __m128 __A)
  823. {
  824. *(__m128_u *)__P = __A;
  825. }
  826. /* Store the lower SPFP value across four words. */
  827. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  828. _mm_store1_ps (float *__P, __m128 __A)
  829. {
  830. __v4sf __va = (__v4sf)__A;
  831. __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
  832. _mm_storeu_ps (__P, __tmp);
  833. }
  834. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  835. _mm_store_ps1 (float *__P, __m128 __A)
  836. {
  837. _mm_store1_ps (__P, __A);
  838. }
  839. /* Store four SPFP values in reverse order. The address must be aligned. */
  840. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  841. _mm_storer_ps (float *__P, __m128 __A)
  842. {
  843. __v4sf __va = (__v4sf)__A;
  844. __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
  845. _mm_store_ps (__P, __tmp);
  846. }
  847. /* Sets the low SPFP value of A from the low value of B. */
  848. extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  849. _mm_move_ss (__m128 __A, __m128 __B)
  850. {
  851. return (__m128) __builtin_shuffle ((__v4sf)__A, (__v4sf)__B,
  852. __extension__
  853. (__attribute__((__vector_size__ (16))) int)
  854. {4,1,2,3});
  855. }
  856. /* Extracts one of the four words of A. The selector N must be immediate. */
  857. #ifdef __OPTIMIZE__
  858. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  859. _mm_extract_pi16 (__m64 const __A, int const __N)
  860. {
  861. return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
  862. }
  863. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  864. _m_pextrw (__m64 const __A, int const __N)
  865. {
  866. return _mm_extract_pi16 (__A, __N);
  867. }
  868. #else
  869. #define _mm_extract_pi16(A, N) \
  870. ((int) __builtin_ia32_vec_ext_v4hi ((__v4hi)(__m64)(A), (int)(N)))
  871. #define _m_pextrw(A, N) _mm_extract_pi16(A, N)
  872. #endif
  873. /* Inserts word D into one of four words of A. The selector N must be
  874. immediate. */
  875. #ifdef __OPTIMIZE__
  876. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  877. _mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
  878. {
  879. return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
  880. }
  881. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  882. _m_pinsrw (__m64 const __A, int const __D, int const __N)
  883. {
  884. return _mm_insert_pi16 (__A, __D, __N);
  885. }
  886. #else
  887. #define _mm_insert_pi16(A, D, N) \
  888. ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(__m64)(A), \
  889. (int)(D), (int)(N)))
  890. #define _m_pinsrw(A, D, N) _mm_insert_pi16(A, D, N)
  891. #endif
  892. /* Compute the element-wise maximum of signed 16-bit values. */
  893. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  894. _mm_max_pi16 (__m64 __A, __m64 __B)
  895. {
  896. return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
  897. }
  898. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  899. _m_pmaxsw (__m64 __A, __m64 __B)
  900. {
  901. return _mm_max_pi16 (__A, __B);
  902. }
  903. /* Compute the element-wise maximum of unsigned 8-bit values. */
  904. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  905. _mm_max_pu8 (__m64 __A, __m64 __B)
  906. {
  907. return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
  908. }
  909. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  910. _m_pmaxub (__m64 __A, __m64 __B)
  911. {
  912. return _mm_max_pu8 (__A, __B);
  913. }
  914. /* Compute the element-wise minimum of signed 16-bit values. */
  915. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  916. _mm_min_pi16 (__m64 __A, __m64 __B)
  917. {
  918. return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
  919. }
  920. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  921. _m_pminsw (__m64 __A, __m64 __B)
  922. {
  923. return _mm_min_pi16 (__A, __B);
  924. }
  925. /* Compute the element-wise minimum of unsigned 8-bit values. */
  926. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  927. _mm_min_pu8 (__m64 __A, __m64 __B)
  928. {
  929. return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
  930. }
  931. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  932. _m_pminub (__m64 __A, __m64 __B)
  933. {
  934. return _mm_min_pu8 (__A, __B);
  935. }
  936. /* Create an 8-bit mask of the signs of 8-bit values. */
  937. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  938. _mm_movemask_pi8 (__m64 __A)
  939. {
  940. return __builtin_ia32_pmovmskb ((__v8qi)__A);
  941. }
  942. extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  943. _m_pmovmskb (__m64 __A)
  944. {
  945. return _mm_movemask_pi8 (__A);
  946. }
  947. /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
  948. in B and produce the high 16 bits of the 32-bit results. */
  949. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  950. _mm_mulhi_pu16 (__m64 __A, __m64 __B)
  951. {
  952. return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
  953. }
  954. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  955. _m_pmulhuw (__m64 __A, __m64 __B)
  956. {
  957. return _mm_mulhi_pu16 (__A, __B);
  958. }
  959. /* Return a combination of the four 16-bit values in A. The selector
  960. must be an immediate. */
  961. #ifdef __OPTIMIZE__
  962. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  963. _mm_shuffle_pi16 (__m64 __A, int const __N)
  964. {
  965. return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
  966. }
  967. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  968. _m_pshufw (__m64 __A, int const __N)
  969. {
  970. return _mm_shuffle_pi16 (__A, __N);
  971. }
  972. #else
  973. #define _mm_shuffle_pi16(A, N) \
  974. ((__m64) __builtin_ia32_pshufw ((__v4hi)(__m64)(A), (int)(N)))
  975. #define _m_pshufw(A, N) _mm_shuffle_pi16 (A, N)
  976. #endif
  977. /* Conditionally store byte elements of A into P. The high bit of each
  978. byte in the selector N determines whether the corresponding byte from
  979. A is stored. */
  980. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  981. _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
  982. {
  983. __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
  984. }
  985. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  986. _m_maskmovq (__m64 __A, __m64 __N, char *__P)
  987. {
  988. _mm_maskmove_si64 (__A, __N, __P);
  989. }
  990. /* Compute the rounded averages of the unsigned 8-bit values in A and B. */
  991. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  992. _mm_avg_pu8 (__m64 __A, __m64 __B)
  993. {
  994. return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
  995. }
  996. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  997. _m_pavgb (__m64 __A, __m64 __B)
  998. {
  999. return _mm_avg_pu8 (__A, __B);
  1000. }
  1001. /* Compute the rounded averages of the unsigned 16-bit values in A and B. */
  1002. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  1003. _mm_avg_pu16 (__m64 __A, __m64 __B)
  1004. {
  1005. return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
  1006. }
  1007. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  1008. _m_pavgw (__m64 __A, __m64 __B)
  1009. {
  1010. return _mm_avg_pu16 (__A, __B);
  1011. }
  1012. /* Compute the sum of the absolute differences of the unsigned 8-bit
  1013. values in A and B. Return the value in the lower 16-bit word; the
  1014. upper words are cleared. */
  1015. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  1016. _mm_sad_pu8 (__m64 __A, __m64 __B)
  1017. {
  1018. return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
  1019. }
  1020. extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  1021. _m_psadbw (__m64 __A, __m64 __B)
  1022. {
  1023. return _mm_sad_pu8 (__A, __B);
  1024. }
  1025. /* Stores the data in A to the address P without polluting the caches. */
  1026. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  1027. _mm_stream_pi (__m64 *__P, __m64 __A)
  1028. {
  1029. __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
  1030. }
  1031. /* Likewise. The address must be 16-byte aligned. */
  1032. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  1033. _mm_stream_ps (float *__P, __m128 __A)
  1034. {
  1035. __builtin_ia32_movntps (__P, (__v4sf)__A);
  1036. }
  1037. /* Guarantees that every preceding store is globally visible before
  1038. any subsequent store. */
  1039. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  1040. _mm_sfence (void)
  1041. {
  1042. __builtin_ia32_sfence ();
  1043. }
  1044. /* Transpose the 4x4 matrix composed of row[0-3]. */
  1045. #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
  1046. do { \
  1047. __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
  1048. __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \
  1049. __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3); \
  1050. __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1); \
  1051. __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \
  1052. (row0) = __builtin_ia32_movlhps (__t0, __t1); \
  1053. (row1) = __builtin_ia32_movhlps (__t1, __t0); \
  1054. (row2) = __builtin_ia32_movlhps (__t2, __t3); \
  1055. (row3) = __builtin_ia32_movhlps (__t3, __t2); \
  1056. } while (0)
  1057. /* For backward source compatibility. */
  1058. # include <emmintrin.h>
  1059. #ifdef __DISABLE_SSE__
  1060. #undef __DISABLE_SSE__
  1061. #pragma GCC pop_options
  1062. #endif /* __DISABLE_SSE__ */
  1063. /* The execution of the next instruction is delayed by an implementation
  1064. specific amount of time. The instruction does not modify the
  1065. architectural state. This is after the pop_options pragma because
  1066. it does not require SSE support in the processor--the encoding is a
  1067. nop on processors that do not support it. */
  1068. extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
  1069. _mm_pause (void)
  1070. {
  1071. __builtin_ia32_pause ();
  1072. }
  1073. #endif /* _XMMINTRIN_H_INCLUDED */