File emmintr.patch of Package gcc41

2006-01-27  H.J. Lu  <hongjiu.lu@intel.com>
 
	* config/i386/emmintrin.h (_mm_cvtsd_f64): Add missing Intel
	intrinsic.
 	(_mm_cvtsi128_si64): Likewise.
 	(_mm_cvtsd_si64): Likewise.
 	(_mm_cvttsd_si64): Likewise.
 	(_mm_cvtsi64_sd): Likewise.
 	(_mm_cvtsi64_si128): Likewise.
 	* config/i386/mmintrin.h (_m_from_int64): Likewise.
 	(_mm_cvtsi64_m64): Likewise.
 	(_m_to_int64): Likewise.
 	(_mm_cvtm64_si64): Likewise.
 	* config/i386/xmmintrin.h (_mm_cvtss_si64): Likewise.
 	(_mm_cvttss_si64): Likewise.
 	(_mm_cvtsi64_ss): Likewise.
 	(_mm_cvtss_f32): Likewise.


Index: gcc/config/i386/mmintrin.h
===================================================================
--- gcc/config/i386/mmintrin.h.orig	2005-10-28 16:17:14.000000000 +0200
+++ gcc/config/i386/mmintrin.h	2009-11-20 13:42:14.000000000 +0100
@@ -25,7 +25,7 @@
    Public License.  */
 
 /* Implemented from the specification included in the Intel C++ Compiler
-   User Guide and Reference, version 8.0.  */
+   User Guide and Reference, version 9.0.  */
 
 #ifndef _MMINTRIN_H_INCLUDED
 #define _MMINTRIN_H_INCLUDED
@@ -69,13 +69,27 @@ _m_from_int (int __i)
 
 #ifdef __x86_64__
 /* Convert I to a __m64 object.  */
+
+/* Intel intrinsic.  */
+static __inline __m64  __attribute__((__always_inline__))
+_m_from_int64 (long long __i)
+{
+  return (__m64) __i;
+}
+
+static __inline __m64  __attribute__((__always_inline__))
+_mm_cvtsi64_m64 (long long __i)
+{
+  return (__m64) __i;
+}
+
+/* Microsoft intrinsic.  */
 static __inline __m64  __attribute__((__always_inline__))
 _mm_cvtsi64x_si64 (long long __i)
 {
   return (__m64) __i;
 }
 
-/* Convert I to a __m64 object.  */
 static __inline __m64  __attribute__((__always_inline__))
 _mm_set_pi64x (long long __i)
 {
@@ -97,7 +111,22 @@ _m_to_int (__m64 __i)
 }
 
 #ifdef __x86_64__
-/* Convert the lower 32 bits of the __m64 object into an integer.  */
+/* Convert the __m64 object to a 64bit integer.  */
+
+/* Intel intrinsic.  */
+static __inline long long __attribute__((__always_inline__))
+_m_to_int64 (__m64 __i)
+{
+  return (long long)__i;
+}
+
+static __inline long long __attribute__((__always_inline__))
+_mm_cvtm64_si64 (__m64 __i)
+{
+  return (long long)__i;
+}
+
+/* Microsoft intrinsic.  */
 static __inline long long __attribute__((__always_inline__))
 _mm_cvtsi64_si64x (__m64 __i)
 {
Index: gcc/config/i386/xmmintrin.h
===================================================================
--- gcc/config/i386/xmmintrin.h.orig	2005-10-28 16:17:15.000000000 +0200
+++ gcc/config/i386/xmmintrin.h	2009-11-20 13:42:14.000000000 +0100
@@ -25,7 +25,7 @@
    Public License.  */
 
 /* Implemented from the specification included in the Intel C++ Compiler
-   User Guide and Reference, version 8.0.  */
+   User Guide and Reference, version 9.0.  */
 
 #ifndef _XMMINTRIN_H_INCLUDED
 #define _XMMINTRIN_H_INCLUDED
@@ -491,8 +491,17 @@ _mm_cvt_ss2si (__m128 __A)
 }
 
 #ifdef __x86_64__
-/* Convert the lower SPFP value to a 32-bit integer according to the current
-   rounding mode.  */
+/* Convert the lower SPFP value to a 32-bit integer according to the
+   current rounding mode.  */
+
+/* Intel intrinsic.  */
+static __inline long long __attribute__((__always_inline__))
+_mm_cvtss_si64 (__m128 __A)
+{
+  return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
+}
+
+/* Microsoft intrinsic.  */
 static __inline long long __attribute__((__always_inline__))
 _mm_cvtss_si64x (__m128 __A)
 {
@@ -529,6 +538,15 @@ _mm_cvtt_ss2si (__m128 __A)
 
 #ifdef __x86_64__
 /* Truncate the lower SPFP value to a 32-bit integer.  */
+
+/* Intel intrinsic.  */
+static __inline long long __attribute__((__always_inline__))
+_mm_cvttss_si64 (__m128 __A)
+{
+  return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
+}
+
+/* Microsoft intrinsic.  */
 static __inline long long __attribute__((__always_inline__))
 _mm_cvttss_si64x (__m128 __A)
 {
@@ -565,6 +583,15 @@ _mm_cvt_si2ss (__m128 __A, int __B)
 
 #ifdef __x86_64__
 /* Convert B to a SPFP value and insert it as element zero in A.  */
+
+/* Intel intrinsic.  */
+static __inline __m128 __attribute__((__always_inline__))
+_mm_cvtsi64_ss (__m128 __A, long long __B)
+{
+  return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
+}
+
+/* Microsoft intrinsic.  */
 static __inline __m128 __attribute__((__always_inline__))
 _mm_cvtsi64x_ss (__m128 __A, long long __B)
 {
@@ -911,6 +938,12 @@ _mm_store_ss (float *__P, __m128 __A)
   *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
 }
 
+static __inline float __attribute__((__always_inline__))
+_mm_cvtss_f32 (__m128 __A)
+{
+  return __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
+}
+
 /* Store four SPFP values.  The address must be 16-byte aligned.  */
 static __inline void __attribute__((__always_inline__))
 _mm_store_ps (float *__P, __m128 __A)
Index: gcc/config/i386/emmintrin.h
===================================================================
--- gcc/config/i386/emmintrin.h.orig	2005-10-28 16:17:15.000000000 +0200
+++ gcc/config/i386/emmintrin.h	2009-11-20 13:42:14.000000000 +0100
@@ -25,7 +25,7 @@
    Public License.  */
 
 /* Implemented from the specification included in the Intel C++ Compiler
-   User Guide and Reference, version 8.0.  */
+   User Guide and Reference, version 9.0.  */
 
 #ifndef _EMMINTRIN_H_INCLUDED
 #define _EMMINTRIN_H_INCLUDED
@@ -158,6 +158,12 @@ _mm_store_sd (double *__P, __m128d __A)
   *__P = __builtin_ia32_vec_ext_v2df (__A, 0);
 }
 
+static __inline double __attribute__((__always_inline__))
+_mm_cvtsd_f64 (__m128d __A)
+{
+  return __builtin_ia32_vec_ext_v2df (__A, 0);
+}
+
 static __inline void __attribute__((__always_inline__))
 _mm_storel_pd (double *__P, __m128d __A)
 {
@@ -199,6 +205,14 @@ _mm_cvtsi128_si32 (__m128i __A)
 }
 
 #ifdef __x86_64__
+/* Intel intrinsic.  */
+static __inline long long __attribute__((__always_inline__))
+_mm_cvtsi128_si64 (__m128i __A)
+{
+  return __builtin_ia32_vec_ext_v2di ((__v2di)__A, 0);
+}
+
+/* Microsoft intrinsic.  */
 static __inline long long __attribute__((__always_inline__))
 _mm_cvtsi128_si64x (__m128i __A)
 {
@@ -789,6 +803,14 @@ _mm_cvtsd_si32 (__m128d __A)
 }
 
 #ifdef __x86_64__
+/* Intel intrinsic.  */
+static __inline long long __attribute__((__always_inline__))
+_mm_cvtsd_si64 (__m128d __A)
+{
+  return __builtin_ia32_cvtsd2si64 ((__v2df) __A);
+}
+
+/* Microsoft intrinsic.  */
 static __inline long long __attribute__((__always_inline__))
 _mm_cvtsd_si64x (__m128d __A)
 {
@@ -803,6 +825,14 @@ _mm_cvttsd_si32 (__m128d __A)
 }
 
 #ifdef __x86_64__
+/* Intel intrinsic.  */
+static __inline long long __attribute__((__always_inline__))
+_mm_cvttsd_si64 (__m128d __A)
+{
+  return __builtin_ia32_cvttsd2si64 ((__v2df) __A);
+}
+
+/* Microsoft intrinsic.  */
 static __inline long long __attribute__((__always_inline__))
 _mm_cvttsd_si64x (__m128d __A)
 {
@@ -823,6 +853,14 @@ _mm_cvtsi32_sd (__m128d __A, int __B)
 }
 
 #ifdef __x86_64__
+/* Intel intrinsic.  */
+static __inline __m128d __attribute__((__always_inline__))
+_mm_cvtsi64_sd (__m128d __A, long long __B)
+{
+  return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B);
+}
+
+/* Microsoft intrinsic.  */
 static __inline __m128d __attribute__((__always_inline__))
 _mm_cvtsi64x_sd (__m128d __A, long long __B)
 {
@@ -1379,6 +1417,14 @@ _mm_cvtsi32_si128 (int __A)
 }
 
 #ifdef __x86_64__
+/* Intel intrinsic.  */
+static __inline __m128i __attribute__((__always_inline__))
+_mm_cvtsi64_si128 (long long __A)
+{
+  return _mm_set_epi64x (0, __A);
+}
+
+/* Microsoft intrinsic.  */
 static __inline __m128i __attribute__((__always_inline__))
 _mm_cvtsi64x_si128 (long long __A)
 {
openSUSE Build Service is sponsored by