From 5ba5d62323c8a7d60f89ecbaa0722732f1ba4c8b Mon Sep 17 00:00:00 2001 From: Andrew Hsieh Date: Fri, 9 Nov 2012 21:00:58 -0800 Subject: New host gcc prebuilt for MacOSX Built from toolchain repo gcc.git/gcc-4.2.1-5666.3 (originally from http://opensource.apple.com/tarballs/gcc/gcc-5666.3.tar.gz) Do the following in source directory twice: both in MacOSX 10.6 and in MacOSX 10.7 systems mkdir -p build/obj build/dst build/sym gnumake install RC_OS=macos RC_ARCHS='i386 x86_64' \ TARGETS='i386 x86_64' SRCROOT=pwd OBJROOT=pwd/build/obj \ DSTROOT=pwd/build/dst SYMROOT=pwd/build/sym Merge both build/dst/usr and remove unneeded files, add *-gcc/g++/cpp links. Change-Id: I946a95a4cd6aace31c4f1c6ffa76eada06d6e161 --- bin/i686-apple-darwin10-cpp | 1 + bin/i686-apple-darwin10-cpp-4.2.1 | Bin 0 -> 556328 bytes bin/i686-apple-darwin10-g++ | 1 + bin/i686-apple-darwin10-g++-4.2.1 | Bin 0 -> 556328 bytes bin/i686-apple-darwin10-gcc | 1 + bin/i686-apple-darwin10-gcc-4.2.1 | Bin 0 -> 548136 bytes bin/i686-apple-darwin11-cpp | 1 + bin/i686-apple-darwin11-cpp-4.2.1 | Bin 0 -> 556328 bytes bin/i686-apple-darwin11-g++ | 1 + bin/i686-apple-darwin11-g++-4.2.1 | Bin 0 -> 556328 bytes bin/i686-apple-darwin11-gcc | 1 + bin/i686-apple-darwin11-gcc-4.2.1 | Bin 0 -> 548136 bytes include/gcc/darwin/4.2/float.h | 9 + include/gcc/darwin/4.2/ppc_intrinsics.h | 1026 ++++++++++ include/gcc/darwin/4.2/stdarg.h | 6 + include/gcc/darwin/4.2/stdint.h | 253 +++ include/gcc/darwin/4.2/varargs.h | 6 + lib/gcc/i686-apple-darwin10/4.2.1/crt3.o | Bin 0 -> 8912 bytes lib/gcc/i686-apple-darwin10/4.2.1/include/README | 14 + .../i686-apple-darwin10/4.2.1/include/ammintrin.h | 106 ++ .../i686-apple-darwin10/4.2.1/include/decfloat.h | 108 ++ .../i686-apple-darwin10/4.2.1/include/emmintrin.h | 1981 ++++++++++++++++++++ lib/gcc/i686-apple-darwin10/4.2.1/include/float.h | 164 ++ lib/gcc/i686-apple-darwin10/4.2.1/include/iso646.h | 48 + lib/gcc/i686-apple-darwin10/4.2.1/include/limits.h | 118 ++ .../i686-apple-darwin10/4.2.1/include/mm3dnow.h | 220 +++ .../i686-apple-darwin10/4.2.1/include/mm_malloc.h | 77 + .../i686-apple-darwin10/4.2.1/include/mmintrin.h | 1219 ++++++++++++ .../i686-apple-darwin10/4.2.1/include/nmmintrin.h | 41 + lib/gcc/i686-apple-darwin10/4.2.1/include/omp.h | 87 + .../i686-apple-darwin10/4.2.1/include/pmmintrin.h | 172 ++ .../4.2.1/include/ppc_intrinsics.h | 1 + .../i686-apple-darwin10/4.2.1/include/smmintrin.h | 836 +++++++++ lib/gcc/i686-apple-darwin10/4.2.1/include/stdarg.h | 133 ++ .../i686-apple-darwin10/4.2.1/include/stdbool.h | 53 + lib/gcc/i686-apple-darwin10/4.2.1/include/stddef.h | 419 +++++ lib/gcc/i686-apple-darwin10/4.2.1/include/stdint.h | 1 + .../i686-apple-darwin10/4.2.1/include/syslimits.h | 8 + lib/gcc/i686-apple-darwin10/4.2.1/include/tgmath.h | 182 ++ .../i686-apple-darwin10/4.2.1/include/tmmintrin.h | 304 +++ lib/gcc/i686-apple-darwin10/4.2.1/include/unwind.h | 241 +++ .../i686-apple-darwin10/4.2.1/include/varargs.h | 7 + .../i686-apple-darwin10/4.2.1/include/xmmintrin.h | 1582 ++++++++++++++++ lib/gcc/i686-apple-darwin10/4.2.1/libcc_kext.a | Bin 0 -> 331808 bytes lib/gcc/i686-apple-darwin10/4.2.1/libgcc.a | Bin 0 -> 246096 bytes lib/gcc/i686-apple-darwin10/4.2.1/libgcc_eh.a | Bin 0 -> 96256 bytes lib/gcc/i686-apple-darwin10/4.2.1/libgcc_static.a | Bin 0 -> 237360 bytes lib/gcc/i686-apple-darwin10/4.2.1/libgcov.a | Bin 0 -> 75376 bytes lib/gcc/i686-apple-darwin10/4.2.1/libgomp.a | Bin 0 -> 159664 bytes lib/gcc/i686-apple-darwin10/4.2.1/libgomp.spec | 3 + lib/gcc/i686-apple-darwin10/4.2.1/libstdc++.dylib | Bin 0 -> 848272 bytes lib/gcc/i686-apple-darwin10/4.2.1/x86_64/crt3.o | Bin 0 -> 11320 bytes lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgcc.a | Bin 0 -> 120088 bytes .../i686-apple-darwin10/4.2.1/x86_64/libgcc_eh.a | Bin 0 -> 37152 bytes lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgcov.a | Bin 0 -> 25000 bytes lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgomp.a | Bin 0 -> 66448 bytes .../i686-apple-darwin10/4.2.1/x86_64/libgomp.spec | 3 + lib/gcc/i686-apple-darwin11/4.2.1/crt3.o | Bin 0 -> 8912 bytes lib/gcc/i686-apple-darwin11/4.2.1/include/README | 14 + .../i686-apple-darwin11/4.2.1/include/ammintrin.h | 106 ++ .../i686-apple-darwin11/4.2.1/include/decfloat.h | 108 ++ .../i686-apple-darwin11/4.2.1/include/emmintrin.h | 1981 ++++++++++++++++++++ lib/gcc/i686-apple-darwin11/4.2.1/include/fenv.h | 18 + lib/gcc/i686-apple-darwin11/4.2.1/include/float.h | 164 ++ lib/gcc/i686-apple-darwin11/4.2.1/include/iso646.h | 48 + lib/gcc/i686-apple-darwin11/4.2.1/include/limits.h | 118 ++ .../i686-apple-darwin11/4.2.1/include/mm3dnow.h | 220 +++ .../i686-apple-darwin11/4.2.1/include/mm_malloc.h | 77 + .../i686-apple-darwin11/4.2.1/include/mmintrin.h | 1219 ++++++++++++ .../i686-apple-darwin11/4.2.1/include/nmmintrin.h | 41 + lib/gcc/i686-apple-darwin11/4.2.1/include/omp.h | 87 + .../i686-apple-darwin11/4.2.1/include/pmmintrin.h | 172 ++ .../4.2.1/include/ppc_intrinsics.h | 1 + .../i686-apple-darwin11/4.2.1/include/smmintrin.h | 836 +++++++++ lib/gcc/i686-apple-darwin11/4.2.1/include/stdarg.h | 133 ++ .../i686-apple-darwin11/4.2.1/include/stdbool.h | 53 + lib/gcc/i686-apple-darwin11/4.2.1/include/stddef.h | 419 +++++ .../i686-apple-darwin11/4.2.1/include/syslimits.h | 8 + lib/gcc/i686-apple-darwin11/4.2.1/include/tgmath.h | 182 ++ .../i686-apple-darwin11/4.2.1/include/tmmintrin.h | 304 +++ lib/gcc/i686-apple-darwin11/4.2.1/include/unwind.h | 241 +++ .../i686-apple-darwin11/4.2.1/include/varargs.h | 7 + .../i686-apple-darwin11/4.2.1/include/xmmintrin.h | 1582 ++++++++++++++++ lib/gcc/i686-apple-darwin11/4.2.1/libcc_kext.a | Bin 0 -> 181240 bytes lib/gcc/i686-apple-darwin11/4.2.1/libgcc.a | Bin 0 -> 90072 bytes lib/gcc/i686-apple-darwin11/4.2.1/libgcc_eh.a | Bin 0 -> 32672 bytes lib/gcc/i686-apple-darwin11/4.2.1/libgcc_static.a | Bin 0 -> 88296 bytes lib/gcc/i686-apple-darwin11/4.2.1/libgcov.a | Bin 0 -> 16608 bytes lib/gcc/i686-apple-darwin11/4.2.1/libgomp.a | Bin 0 -> 42136 bytes lib/gcc/i686-apple-darwin11/4.2.1/libgomp.spec | 3 + lib/gcc/i686-apple-darwin11/4.2.1/libstdc++.dylib | Bin 0 -> 619948 bytes lib/gcc/i686-apple-darwin11/4.2.1/x86_64/crt3.o | Bin 0 -> 11320 bytes lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgcc.a | Bin 0 -> 120088 bytes .../i686-apple-darwin11/4.2.1/x86_64/libgcc_eh.a | Bin 0 -> 37152 bytes lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgcov.a | Bin 0 -> 25000 bytes lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgomp.a | Bin 0 -> 66448 bytes .../i686-apple-darwin11/4.2.1/x86_64/libgomp.spec | 3 + libexec/gcc/i686-apple-darwin10/4.2.1/as | 1 + libexec/gcc/i686-apple-darwin10/4.2.1/cc1 | Bin 0 -> 12474696 bytes libexec/gcc/i686-apple-darwin10/4.2.1/cc1obj | Bin 0 -> 12775896 bytes libexec/gcc/i686-apple-darwin10/4.2.1/cc1objplus | Bin 0 -> 14066448 bytes libexec/gcc/i686-apple-darwin10/4.2.1/cc1plus | Bin 0 -> 13748656 bytes libexec/gcc/i686-apple-darwin10/4.2.1/collect2 | Bin 0 -> 279208 bytes libexec/gcc/i686-apple-darwin10/4.2.1/ld | 1 + libexec/gcc/i686-apple-darwin11/4.2.1/as | 1 + libexec/gcc/i686-apple-darwin11/4.2.1/cc1 | Bin 0 -> 12474696 bytes libexec/gcc/i686-apple-darwin11/4.2.1/cc1obj | Bin 0 -> 12775896 bytes libexec/gcc/i686-apple-darwin11/4.2.1/cc1objplus | Bin 0 -> 14066448 bytes libexec/gcc/i686-apple-darwin11/4.2.1/cc1plus | Bin 0 -> 13748656 bytes libexec/gcc/i686-apple-darwin11/4.2.1/collect2 | Bin 0 -> 279208 bytes libexec/gcc/i686-apple-darwin11/4.2.1/ld | 1 + 111 files changed, 17583 insertions(+) create mode 120000 bin/i686-apple-darwin10-cpp create mode 100755 bin/i686-apple-darwin10-cpp-4.2.1 create mode 120000 bin/i686-apple-darwin10-g++ create mode 100755 bin/i686-apple-darwin10-g++-4.2.1 create mode 120000 bin/i686-apple-darwin10-gcc create mode 100755 bin/i686-apple-darwin10-gcc-4.2.1 create mode 120000 bin/i686-apple-darwin11-cpp create mode 100755 bin/i686-apple-darwin11-cpp-4.2.1 create mode 120000 bin/i686-apple-darwin11-g++ create mode 100755 bin/i686-apple-darwin11-g++-4.2.1 create mode 120000 bin/i686-apple-darwin11-gcc create mode 100755 bin/i686-apple-darwin11-gcc-4.2.1 create mode 100644 include/gcc/darwin/4.2/float.h create mode 100644 include/gcc/darwin/4.2/ppc_intrinsics.h create mode 100644 include/gcc/darwin/4.2/stdarg.h create mode 100644 include/gcc/darwin/4.2/stdint.h create mode 100644 include/gcc/darwin/4.2/varargs.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/crt3.o create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/README create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/ammintrin.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/decfloat.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/emmintrin.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/float.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/iso646.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/limits.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/mm3dnow.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/mm_malloc.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/mmintrin.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/nmmintrin.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/omp.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/pmmintrin.h create mode 120000 lib/gcc/i686-apple-darwin10/4.2.1/include/ppc_intrinsics.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/smmintrin.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/stdarg.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/stdbool.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/stddef.h create mode 120000 lib/gcc/i686-apple-darwin10/4.2.1/include/stdint.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/syslimits.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/tgmath.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/tmmintrin.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/unwind.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/varargs.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/include/xmmintrin.h create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/libcc_kext.a create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/libgcc.a create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/libgcc_eh.a create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/libgcc_static.a create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/libgcov.a create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/libgomp.a create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/libgomp.spec create mode 100755 lib/gcc/i686-apple-darwin10/4.2.1/libstdc++.dylib create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/x86_64/crt3.o create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgcc.a create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgcc_eh.a create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgcov.a create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgomp.a create mode 100644 lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgomp.spec create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/crt3.o create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/README create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/ammintrin.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/decfloat.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/emmintrin.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/fenv.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/float.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/iso646.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/limits.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/mm3dnow.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/mm_malloc.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/mmintrin.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/nmmintrin.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/omp.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/pmmintrin.h create mode 120000 lib/gcc/i686-apple-darwin11/4.2.1/include/ppc_intrinsics.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/smmintrin.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/stdarg.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/stdbool.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/stddef.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/syslimits.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/tgmath.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/tmmintrin.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/unwind.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/varargs.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/include/xmmintrin.h create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/libcc_kext.a create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/libgcc.a create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/libgcc_eh.a create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/libgcc_static.a create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/libgcov.a create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/libgomp.a create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/libgomp.spec create mode 100755 lib/gcc/i686-apple-darwin11/4.2.1/libstdc++.dylib create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/x86_64/crt3.o create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgcc.a create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgcc_eh.a create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgcov.a create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgomp.a create mode 100644 lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgomp.spec create mode 120000 libexec/gcc/i686-apple-darwin10/4.2.1/as create mode 100755 libexec/gcc/i686-apple-darwin10/4.2.1/cc1 create mode 100755 libexec/gcc/i686-apple-darwin10/4.2.1/cc1obj create mode 100755 libexec/gcc/i686-apple-darwin10/4.2.1/cc1objplus create mode 100755 libexec/gcc/i686-apple-darwin10/4.2.1/cc1plus create mode 100755 libexec/gcc/i686-apple-darwin10/4.2.1/collect2 create mode 120000 libexec/gcc/i686-apple-darwin10/4.2.1/ld create mode 120000 libexec/gcc/i686-apple-darwin11/4.2.1/as create mode 100755 libexec/gcc/i686-apple-darwin11/4.2.1/cc1 create mode 100755 libexec/gcc/i686-apple-darwin11/4.2.1/cc1obj create mode 100755 libexec/gcc/i686-apple-darwin11/4.2.1/cc1objplus create mode 100755 libexec/gcc/i686-apple-darwin11/4.2.1/cc1plus create mode 100755 libexec/gcc/i686-apple-darwin11/4.2.1/collect2 create mode 120000 libexec/gcc/i686-apple-darwin11/4.2.1/ld diff --git a/bin/i686-apple-darwin10-cpp b/bin/i686-apple-darwin10-cpp new file mode 120000 index 0000000..b3c51d0 --- /dev/null +++ b/bin/i686-apple-darwin10-cpp @@ -0,0 +1 @@ +i686-apple-darwin10-cpp-4.2.1 \ No newline at end of file diff --git a/bin/i686-apple-darwin10-cpp-4.2.1 b/bin/i686-apple-darwin10-cpp-4.2.1 new file mode 100755 index 0000000..4e0f3bd Binary files /dev/null and b/bin/i686-apple-darwin10-cpp-4.2.1 differ diff --git a/bin/i686-apple-darwin10-g++ b/bin/i686-apple-darwin10-g++ new file mode 120000 index 0000000..1707d9b --- /dev/null +++ b/bin/i686-apple-darwin10-g++ @@ -0,0 +1 @@ +i686-apple-darwin10-g++-4.2.1 \ No newline at end of file diff --git a/bin/i686-apple-darwin10-g++-4.2.1 b/bin/i686-apple-darwin10-g++-4.2.1 new file mode 100755 index 0000000..f902c89 Binary files /dev/null and b/bin/i686-apple-darwin10-g++-4.2.1 differ diff --git a/bin/i686-apple-darwin10-gcc b/bin/i686-apple-darwin10-gcc new file mode 120000 index 0000000..a938622 --- /dev/null +++ b/bin/i686-apple-darwin10-gcc @@ -0,0 +1 @@ +i686-apple-darwin10-gcc-4.2.1 \ No newline at end of file diff --git a/bin/i686-apple-darwin10-gcc-4.2.1 b/bin/i686-apple-darwin10-gcc-4.2.1 new file mode 100755 index 0000000..8277003 Binary files /dev/null and b/bin/i686-apple-darwin10-gcc-4.2.1 differ diff --git a/bin/i686-apple-darwin11-cpp b/bin/i686-apple-darwin11-cpp new file mode 120000 index 0000000..ae718e1 --- /dev/null +++ b/bin/i686-apple-darwin11-cpp @@ -0,0 +1 @@ +i686-apple-darwin11-cpp-4.2.1 \ No newline at end of file diff --git a/bin/i686-apple-darwin11-cpp-4.2.1 b/bin/i686-apple-darwin11-cpp-4.2.1 new file mode 100755 index 0000000..a9d6391 Binary files /dev/null and b/bin/i686-apple-darwin11-cpp-4.2.1 differ diff --git a/bin/i686-apple-darwin11-g++ b/bin/i686-apple-darwin11-g++ new file mode 120000 index 0000000..23da3d0 --- /dev/null +++ b/bin/i686-apple-darwin11-g++ @@ -0,0 +1 @@ +i686-apple-darwin11-g++-4.2.1 \ No newline at end of file diff --git a/bin/i686-apple-darwin11-g++-4.2.1 b/bin/i686-apple-darwin11-g++-4.2.1 new file mode 100755 index 0000000..fa10a85 Binary files /dev/null and b/bin/i686-apple-darwin11-g++-4.2.1 differ diff --git a/bin/i686-apple-darwin11-gcc b/bin/i686-apple-darwin11-gcc new file mode 120000 index 0000000..739f7a3 --- /dev/null +++ b/bin/i686-apple-darwin11-gcc @@ -0,0 +1 @@ +i686-apple-darwin11-gcc-4.2.1 \ No newline at end of file diff --git a/bin/i686-apple-darwin11-gcc-4.2.1 b/bin/i686-apple-darwin11-gcc-4.2.1 new file mode 100755 index 0000000..89a734a Binary files /dev/null and b/bin/i686-apple-darwin11-gcc-4.2.1 differ diff --git a/include/gcc/darwin/4.2/float.h b/include/gcc/darwin/4.2/float.h new file mode 100644 index 0000000..048737f --- /dev/null +++ b/include/gcc/darwin/4.2/float.h @@ -0,0 +1,9 @@ +/* This file is public domain. */ +/* This file exists soley to keep Metrowerks' compilers happy. The version + used by GCC 3.4 and later can be found in /usr/lib/gcc, although it's + not very informative. */ +#ifdef __MWERKS__ +#include "mw_float.h" +#else +#error "This header only supports __MWERKS__." +#endif diff --git a/include/gcc/darwin/4.2/ppc_intrinsics.h b/include/gcc/darwin/4.2/ppc_intrinsics.h new file mode 100644 index 0000000..8cfe01f --- /dev/null +++ b/include/gcc/darwin/4.2/ppc_intrinsics.h @@ -0,0 +1,1026 @@ +/* APPLE LOCAL file PPC_INTRINSICS */ + +/* Definitions for PowerPC intrinsic instructions + Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 2, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to the Free +Software Foundation, 59 Temple Place - Suite 330, Boston, MA +02111-1307, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* + * The following PowerPC intrinsics are provided by this header: + * + * Low-Level Processor Synchronization + * __eieio - Enforce In-Order Execution of I/O + * __isync - Instruction Synchronize + * __sync - Synchronize + * __lwsync - Lightweight Synchronize + * + * Manipulating the Contents of a Variable or Register + * __cntlzw - Count Leading Zeros Word + * __cntlzd - Count Leading Zeros Double Word + * __rlwimi - Rotate Left Word Immediate then Mask Insert + * __rlwinm - Rotate Left Word Immediate then AND with Mask + * __rlwnm - Rotate Left Word then AND with Mask + * + * Byte-Reversing Functions + * __lhbrx - Load Half Word Byte-Reverse Indexed + * __lwbrx - Load Word Byte-Reverse Indexed + * __sthbrx - Store Half Word Byte-Reverse Indexed + * __stwbrx - Store Word Byte-Reverse Indexed + * + * Data Cache Manipulation + * __dcba - Data Cache Block Allocate + * __dcbf - Data Cache Block Flush + * __dcbst - Data Cache Block Store + * __dcbt - Data Cache Block Touch + * __dcbtst - Data Cache Block Touch for Store + * __dcbzl - Data Cache Block Set to Zero + * __dcbz - Data Cache Block Set to Zero (32-bytes only) + * + * Setting the Floating-Point Environment + * __setflm - Set Floating-point Mode + * + * Math Functions + * __fabs - Floating-Point Absolute Value + * __fnabs - Floating Negative Absolute Value + * __fctiw - Floating Convert to Integer Word + * __fctiwz - Floating Convert to Integer Word with Round toward Zero + * __fctidz - Floating Convert to Integer Doubleword with Round toward Zero + * __fctid - Floating Convert to Integer Doubleword + * __fcfid - Floating Convert From Integer Doubleword + * __fmadd - Floating Multiply-Add (Double-Precision) + * __fmadds - Floating Multiply-Add Single + * __fmsub - Floating Multiply-Subract (Double-Precision) + * __fmsubs - Floating Multiply-Subract Single + * __fmul - Floating Multiply (Double-Precision) + * __fmuls - Floating Multiply Single + * __fnmadd - Floating Negative Multiply-Add (Double-Precision) + * __fnmadds - Floating Negative Multiply-Add Single + * __fnmsub - Floating Negative Multiply-Subtract (Double-Precision) + * __fnmsubs - Floating Negative Multiply-Subtract Single + * __fres - Floating Reciprocal Estimate + * __frsp - Floating Round to Single-Precision + * __frsqrte - Floating Reciprocal Square Root Estimate + * __frsqrtes - Floating Reciprocal Square Root Estimate Single + * __fsel - Floating Select + * __fsels - Floating Select (Single-Precision variant) + * __fsqrt - Floating-Point Square Root (Double-Precision) + * __fsqrts - Floating-Point Square Root Single-Precision + * __mulhw - Multiply High Word + * __mulhwu - Multiply High Word Unsigned + * __stfiwx - Store Floating-Point as Integer Word Indexed + * + * Miscellaneous Functions + * __nop - PPC preferred form of no operation + * __astrcmp - assembly strcmp + * __icbi - Instruction Cache Block Invalidate + * __mffs - Move from FPSCR + * __mfspr - Move from Special Purpose Register + * __mtfsf - Move to SPSCR Fields + * __mtspr - Move to Special Purpose Register + * __OSReadSwapSInt16 - lhbrx for signed shorts + * __OSReadSwapUInt16 - lhbrx for unsigned shorts + * + * TO DO: + * - Desired: + * mullw + * - Available in CodeWarrior, not yet implemented here: + * abs, labs, fabsf, fnabsf + * + * NOTES: + * - Some of the intrinsics need to be macros because certain + * parameters MUST be integer constants and not values in registers. + * - The declarations use __asm__ instead of asm and __inline__ instead + * of inline to prevent errors when -ansi is specified. + * - Some of the intrinsic definitions use the "volatile" specifier on + * the "asm" statements in order to work around what appears to be + * a bug in the compiler/optimizer. In general we have avoided the + * use of "volatile" because it suppresses optimization on the + * generated instructions. The instructions to which "volatile" + * has been added where it appears that it should not be needed are + * lhbrx and lwbrx. + * + * Contributors: Fred Forsman (editor), Turly O'Connor, Ian Ollmann, Sanjay Patel + * Last modified: October 6, 2004 + */ + +#ifndef _PPC_INTRINSICS_H_ +#define _PPC_INTRINSICS_H_ + +#if (defined(__ppc__) || defined(__ppc64__)) && ! defined(__MWERKS__) + +/******************************************************************* + * Special Purpose Registers (SPRs) * + *******************************************************************/ + +#define __SPR_MQR 0 /* PPC 601 only */ +#define __SPR_XER 1 +#define __SPR_RTCU 4 /* Real time clock upper. PPC 601 only.*/ +#define __SPR_RTCL 5 /* Real time clock lower. PPC 601 only.*/ +#define __SPR_LR 8 +#define __SPR_CTR 9 +#define __SPR_VRSAVE 256 /* AltiVec */ +#define __SPR_TBL 268 /* Time-base Lower. Not on PPC 601 */ +#define __SPR_TBU 269 /* Time-base Upper. Not on PPC 601 */ +#define __SPR_UMMCR2 928 /* PPC 74xx */ +#define __SPR_UPMC5 929 /* PPC 745x */ +#define __SPR_UPMC6 930 /* PPC 745x */ +#define __SPR_UBAMR 935 /* PPC 7400 and 7410 */ +#define __SPR_UMMCR0 936 /* PPC 74xx and 750 */ +#define __SPR_UPMC1 937 /* PPC 74xx and 750 */ +#define __SPR_UPMC2 938 /* PPC 74xx and 750 */ +#define __SPR_USIAR 939 /* PPC 74xx and 750 */ +#define __SPR_UMMCR1 940 /* PPC 74xx and 750 */ +#define __SPR_UPMC3 941 /* PPC 74xx and 750 */ +#define __SPR_UPMC4 942 /* PPC 74xx and 750 */ +#define __SPR_PIR 1023 /* supervisor level only! */ + +/* + * Shorthand macros for some commonly used SPR's. + */ +#define __mfxer() __mfspr(__SPR_XER) +#define __mflr() __mfspr(__SPR_LR) +#define __mfctr() __mfspr(__SPR_CTR) +#define __mfvrsave() __mfspr(__SPR_VRSAVE) +#define __mftb() __mfspr(__SPR_TBL) +#define __mftbu() __mfspr(__SPR_TBU) + +#define __mtlr(value) __mtspr(__SPR_LR, value) +#define __mtxer(value) __mtspr(__SPR_XER, value) +#define __mtctr(value) __mtspr(__SPR_CTR, value) +#define __mtvrsave(value) __mtspr(__SPR_VRSAVE, value) + + +/******************************************************************* + * Low-Level Processor Synchronization * + *******************************************************************/ + +/* + * __eieio - Enforce In-Order Execution of I/O + * + * void __eieio (void); + */ +#define __eieio() __asm__ ("eieio" : : : "memory") + +/* + * __isync - Instruction Synchronize + * + * void __isync (void); + */ +#define __isync() \ + __asm__ volatile ("isync") + +/* + * __sync - Synchronize + * + * void __sync (void); + */ +#define __sync() __asm__ volatile ("sync") + +/* + * __lwsync - Lightweight Synchronize, see PPC2.01, Book 2 + * + * void __lwsync (void); + */ +#define __lwsync() __asm__ volatile ("sync 1") + + +/******************************************************************* + * Byte-Reversing Functions * + *******************************************************************/ + +/* + * __lhbrx - Load Half Word Byte-Reverse Indexed + * + * int __lhbrx(void *, int); + */ +#define __lhbrx(base, index) \ + ({ unsigned short __ppc_i_lhbrxResult; \ + __asm__ volatile ("lhbrx %0, %1, %2" : "=r" (__ppc_i_lhbrxResult) : "b%" (index), "r" (base) : "memory"); \ + /*return*/ __ppc_i_lhbrxResult; }) + +/* + * __lwbrx - Load Word Byte-Reverse Indexed + * + * int __lwbrx(void *, int); + */ +#define __lwbrx(base, index) \ + ({ unsigned int __ppc_i_lwbrxResult; \ + __asm__ volatile ("lwbrx %0, %1, %2" : "=r" (__ppc_i_lwbrxResult) : "b%" (index), "r" (base) : "memory"); \ + /*return*/ __ppc_i_lwbrxResult; }) + +/* + * __sthbrx - Store Half Word Byte-Reverse Indexed + * + * int __sthbrx(unsigned short, void *, int); + */ +#define __sthbrx(value, base, index) \ + __asm__ ("sthbrx %0, %1, %2" : : "r" (value), "b%" (index), "r" (base) : "memory") + +/* + * __stwbrx - Store Word Byte-Reverse Indexed + * + * int __sthbrx(unsigned int, void *, int); + */ +#define __stwbrx(value, base, index) \ + __asm__ ("stwbrx %0, %1, %2" : : "r" (value), "b%" (index), "r" (base) : "memory") + + +/******************************************************************* + * Manipulating the Contents of a Variable or Register * + *******************************************************************/ + +/* + * __cntlzw - Count Leading Zeros Word + * __cntlzd - Count Leading Zeros Double Word + */ + +#define __cntlzw(a) __builtin_clz(a) +#define __cntlzd(a) __builtin_clzll(a) + +/* + * __rlwimi - Rotate Left Word Immediate then Mask Insert + * + * int __rlwimi(int, long, int, int, int); + * + * We don't mention "%1" below: operand[1] needs to be skipped as + * it's just a placeholder to let the compiler know that rA is read + * from as well as written to. + */ +#define __rlwimi(rA, rS, cnt, mb, me) \ + ({ __asm__ ("rlwimi %0,%2,%3,%4,%5" : "=r" (rA) \ + : "0" (rA), "r" (rS), "n" (cnt), "n" (mb), "n" (me)); \ + /*return*/ rA;}) + +/* + * __rlwinm - Rotate Left Word Immediate then AND with Mask + * + * int __rlwinm(long, int, int, int); + */ +#define __rlwinm(rS, cnt, mb, me) \ + ({ unsigned int __ppc_i_val; \ + __asm__ ("rlwinm %0,%1,%2,%3,%4" : "=r" (__ppc_i_val) \ + : "r" (rS), "n" (cnt), "n" (mb), "n" (me)); \ + /*return*/ __ppc_i_val;}) + +/* + * __rlwnm - Rotate Left Word then AND with Mask + * + * int __rlwnm(long, int, int, int); + */ +#define __rlwnm(value, leftRotateBits, maskStart, maskEnd) \ + ({ unsigned int __ppc_i_result; \ + __asm__ ("rlwnm %0, %1, %2, %3, %4" : "=r" (__ppc_i_result) : \ + "r" (value), "r" (leftRotateBits), "n" (maskStart), "n" (maskEnd)); \ + /*return */ __ppc_i_result; }) + + +/******************************************************************* + * Data Cache Manipulation * + *******************************************************************/ + +/* + * --- Data Cache Block instructions --- + * + * Please see Motorola's "The Programming Environments for 32-Bit + * Microprocessors" for a description of what these do. + * + * Parameter descriptions: + * + * base starting address for figuring out where the + * cacheline is + * + * index byte count to be added to the base address for + * purposes of calculating the effective address + * of the cacheline to be operated on. + * + * Effective Address of cacheline to be manipulated = + * (char*) base + index + * + * WARNING: The size and alignment of cachelines are subject to + * change on future processors! Cachelines are 32 bytes in + * size and are aligned to 32 bytes on PowerPC 601, 603, 604, + * 750, 7400, 7410, 7450, and 7455. + * + */ + +/* + * __dcba - Data Cache Block Allocate + * + * void __dcba(void *, int) + * + * WARNING: dcba is a valid instruction only on PowerPC 7400, 7410, + * 7450 and 7455. + */ +#define __dcba(base, index) \ + __asm__ ("dcba %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory") + +/* + * __dcbf - Data Cache Block Flush + * + * void __dcbf(void *, int); + */ +#define __dcbf(base, index) \ + __asm__ ("dcbf %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory") + +/* + * __dcbst - Data Cache Block Store + * + * void __dcbst(void *, int); + */ +#define __dcbst(base, index) \ + __asm__ ("dcbst %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory") + +/* + * __dcbt - Data Cache Block Touch + * + * void __dcbt(void *, int); + */ +#define __dcbt(base, index) \ + __asm__ ("dcbt %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory") + +/* + * __dcbtst - Data Cache Block Touch for Store + * + * void __dcbtst(void *, int); + */ +#define __dcbtst(base, index) \ + __asm__ ("dcbtst %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory") + +/* + * __dcbzl - Data Cache Block Set to Zero + * + * void __dcbzl(void *, int); + */ +#define __dcbzl(base, index) \ + __asm__ ("dcbzl %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory") + +/* + * __dcbz - Data Cache Block Set to Zero (32-bytes only) + * + * WARNING: this is for legacy purposes only + * + * void __dcbz(void *, int); + */ +#define __dcbz(base, index) \ + __asm__ ("dcbz %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory") + + +/******************************************************************* + * Setting the Floating-Point Environment * + *******************************************************************/ + +/* + * __setflm - Set Floating-point Mode + * + * Sets the FPSCR (floating-point status and control register), + * returning the original value. + * + * ??? CW: float __setflm(float); + */ +static inline double __setflm (double newflm) __attribute__((always_inline)); +static inline double +__setflm(double newflm) +{ + double original; + + __asm__ ("mffs %0" + /* outputs: */ : "=f" (original)); + __asm__ ("mtfsf 255,%0" + /* outputs: */ : /* none */ + /* inputs: */ : "f" (newflm)); + return original; +} + + +/******************************************************************* + * Math Functions * + *******************************************************************/ + +/* + * __fabs - Floating-Point Absolute Value + */ +static inline double __fabs (double value) __attribute__((always_inline)); +static inline double +__fabs (double value) +{ + double result; + __asm__ ("fabs %0, %1" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (value)); + return result; +} + +/* + * __fnabs - Floating Negative Absolute Value + */ +static inline double __fnabs (double b) __attribute__((always_inline)); +static inline double +__fnabs (double b) +{ + double result; + __asm__ ("fnabs %0, %1" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (b)); + return result; +} + +/* + * fctiw - Floating Convert to Integer Word + * + * Convert the input value to a signed long and place in the low 32 + * bits of the FP register. Clip to LONG_MIN or LONG_MAX if the FP + * value exceeds the range representable by a long. Use the rounding + * mode indicated in the FPSCR. + */ +static inline double __fctiw (double b) __attribute__((always_inline)); +static inline double +__fctiw (double b) +{ + double result; + __asm__ ("fctiw %0, %1" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (b)); + return result; +} + +/* + * fctiwz - Floating Convert to Integer Word with Round toward Zero + * + * Convert the input value to a signed long and place in the low 32 + * bits of the FP register. Clip to LONG_MIN or LONG_MAX if the FP + * value exceeds the range representable by a long. + */ +static inline double __fctiwz (double b) __attribute__((always_inline)); +static inline double +__fctiwz (double b) +{ + double result; + __asm__ ("fctiwz %0, %1" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (b)); + return result; +} + +/* + * fctidz - Floating Convert to Integer Double Word with Round toward Zero + * + * Convert the input value to a signed 64-bit int and place in the FP + * destination register. Clip to LLONG_MIN (-2**63) or LLONG_MAX (2**63-1) + * if the FP value exceeds the range representable by a int64_t. + * + * WARNING: fctidz is a valid instruction only on 64-bit PowerPC + */ +static inline double __fctidz (double b) __attribute__((always_inline)); +static inline double +__fctidz (double b) +{ + double result; + __asm__ ("fctidz %0, %1" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (b)); + return result; +} + +/* + * fctid - Floating Convert to Integer Double Word + * + * Convert the input value to a signed 64-bit int and place in the FP + * destination register. Clip to LLONG_MIN (-2**63) or LLONG_MAX (2**63-1) + * if the FP value exceeds the range representable by a int64_t. Use the + * rounding mode indicated in the FPSCR. + * + * WARNING: fctid is a valid instruction only on 64-bit PowerPC + */ +static inline double __fctid (double b) __attribute__((always_inline)); +static inline double +__fctid (double b) +{ + double result; + __asm__ ("fctid %0, %1" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (b)); + return result; +} + +/* + * fcfid - Floating Convert From Integer Double Word + * + * Convert the 64-bit signed integer input value to a 64-bit FP value. + * Use the rounding mode indicated in the FPSCR if the integer is out of + * double precision range. + * + * WARNING: fcfid is a valid instruction only on 64-bit PowerPC + */ +static inline double __fcfid (double b) __attribute__((always_inline)); +static inline double +__fcfid (double b) +{ + double result; + __asm__ ("fcfid %0, %1" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (b)); + return result; +} + +/* + * fmadd - Floating Multiply-Add (Double-Precision) + * + * (a * c + b) double precision + */ +static inline double __fmadd (double a, double c, double b) __attribute__((always_inline)); +static inline double +__fmadd (double a, double c, double b) +{ + double result; + __asm__ ("fmadd %0, %1, %2, %3" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (a), "f" (c), "f" (b)); + return result; +} + +/* + * fmadds - Floating Multiply-Add Single + * + * (a * c + b) single precision + * + * Double precision arguments are used to prevent the compiler from + * issuing frsp instructions upstream. + */ +static inline float __fmadds (double a, double c, double b) __attribute__((always_inline)); +static inline float +__fmadds (double a, double c, double b) +{ + float result; + __asm__ ("fmadds %0, %1, %2, %3" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (a), "f" (c), "f" (b)); + return result; +} + +/* + * fmsub - Floating Multiply-Subract (Double-Precision) + * + * (a * c - b) double precision + */ +static inline double __fmsub (double a, double c, double b) __attribute__((always_inline)); +static inline double +__fmsub (double a, double c, double b) +{ + double result; + __asm__ ("fmsub %0, %1, %2, %3" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (a), "f" (c), "f" (b)); + return result; +} + +/* + * fmsubs - Floating Multiply-Subract Single + * + * (a * c - b) single precision + * + * Double precision arguments are used to prevent the compiler from + * issuing frsp instructions upstream. + */ +static inline float __fmsubs (double a, double c, double b) __attribute__((always_inline)); +static inline float +__fmsubs (double a, double c, double b) +{ + float result; + __asm__ ("fmsubs %0, %1, %2, %3" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (a), "f" (c), "f" (b)); + return result; +} + +/* + * fmul - Floating Multiply (Double-Precision) + * + * (a * c) double precision + */ +static inline double __fmul (double a, double c) __attribute__((always_inline)); +static inline double +__fmul (double a, double c) +{ + double result; + __asm__ ("fmul %0, %1, %2" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (a), "f" (c)); + return result; +} + +/* + * fmuls - Floating Multiply Single + * + * (a * c) single precision + * + * Double precision arguments are used to prevent the compiler from + * issuing frsp instructions upstream. + */ +static inline float __fmuls (double a, double c) __attribute__((always_inline)); +static inline float +__fmuls (double a, double c) +{ + float result; + __asm__ ("fmuls %0, %1, %2" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (a), "f" (c)); + return result; +} + +/* + * __fnmadd - Floating Negative Multiply-Add (Double-Precision) + * + * -(a * c + b) double precision + */ +static inline double __fnmadd (double a, double c, double b) __attribute__((always_inline)); +static inline double +__fnmadd (double a, double c, double b) +{ + double result; + __asm__ ("fnmadd %0, %1, %2, %3" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (a), "f" (c), "f" (b)); + return result; +} + +/* + * __fnmadds - Floating Negative Multiply-Add Single + * + * -(a * c + b) single precision + * + * Double precision arguments are used to prevent the compiler from + * issuing frsp instructions upstream. + */ +static inline float __fnmadds (double a, double c, double b) __attribute__((always_inline)); +static inline float +__fnmadds (double a, double c, double b) +{ + float result; + __asm__ ("fnmadds %0, %1, %2, %3" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (a), "f" (c), "f" (b)); + return result; +} + +/* + * __fnmsub - Floating Negative Multiply-Subtract (Double-Precision) + * + * -(a * c - B) double precision + */ +static inline double __fnmsub (double a, double c, double b) __attribute__((always_inline)); +static inline double +__fnmsub (double a, double c, double b) +{ + double result; + __asm__ ("fnmsub %0, %1, %2, %3" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (a), "f" (c), "f" (b)); + return result; +} + +/* + * __fnmsubs - Floating Negative Multiply-Subtract Single + * + * -(a * c - b) single precision + * + * Double precision arguments are used to prevent the compiler from + * issuing frsp instructions upstream. + */ +static inline float __fnmsubs (double a, double c, double b) __attribute__((always_inline)); +static inline float +__fnmsubs (double a, double c, double b) +{ + float result; + __asm__ ("fnmsubs %0, %1, %2, %3" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (a), "f" (c), "f" (b)); + return result; +} + +/* + * __fres - Floating Reciprocal Estimate + * + * Produces a double precision result with 5 bits of accuracy. + * Note: not valid on the PowerPC 601. + * + * ??? CW: float __fres(float) + */ +static inline float __fres (float val) __attribute__((always_inline)); +static inline float +__fres (float val) +{ + float estimate; + __asm__ ("fres %0,%1" + /* outputs: */ : "=f" (estimate) + /* inputs: */ : "f" (val)); + return estimate; +} + +/* + * __frsp - Floating Round to Single-Precision + */ +static inline float __frsp (double d) __attribute__((always_inline)); +static inline float +__frsp (double d) +{ + float result; + __asm__ ("frsp %0, %1" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (d)); + return result; +} + +/* + * __frsqrte - Floating Reciprocal Square Root Estimate + * + * Note: not valid on the PowerPC 601. + */ +static inline double __frsqrte (double val) __attribute__((always_inline)); +static inline double +__frsqrte (double val) +{ + double estimate; + + __asm__ ("frsqrte %0,%1" + /* outputs: */ : "=f" (estimate) + /* inputs: */ : "f" (val)); + return estimate; +} + +/* + * __frsqrtes - Floating Reciprocal Square Root Estimate Single + */ +static inline float __frsqrtes (double f) __attribute__((always_inline)); +static inline float +__frsqrtes (double f) +{ + float result; + __asm__ ("frsqrte %0, %1" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (f)); + return result; +} + +/* + * __fsel - Floating Select + * + * if (test >= 0) return a; else return b; + * + * Note: not valid on the PowerPC 601. + */ +static inline double __fsel (double test, double a, double b) __attribute__((always_inline)); +static inline double +__fsel (double test, double a, double b) +{ + double result; + __asm__ ("fsel %0,%1,%2,%3" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (test), "f" (a), "f" (b)); + return result; +} + +/* + * __fsels - Floating Select (Single-Precision variant) + * + * An artificial single precision variant of fsel. This produces the + * same results as fsel, but is useful because the result is cast as + * a float, discouraging the compiler from issuing a frsp instruction + * afterward. + */ +static inline float __fsels (double test, double a, double b) __attribute__((always_inline)); +static inline float +__fsels (double test, double a, double b) +{ + float result; + __asm__ ("fsel %0,%1,%2,%3" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (test), "f" (a), "f" (b)); + return result; +} + +/* + * __fsqrt - Floating-Point Square Root (Double-Precision) + * + * WARNING: Illegal instruction for PowerPC 603, 604, 750, 7400, 7410, + * 7450, and 7455 + */ +static inline double __fsqrt (double b) __attribute__((always_inline)); +static inline double +__fsqrt(double d) +{ + double result; + __asm__ ("fsqrt %0, %1" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (d)); + return result; +} + +/* + * __fsqrts - Floating-Point Square Root Single-Precision + * + * WARNING: Illegal instruction for PowerPC 603, 604, 750, 7400, 7410, + * 7450, and 7455 + */ +static inline float __fsqrts (float f) __attribute__((always_inline)); +static inline float +__fsqrts (float f) +{ + float result; + __asm__ ("fsqrts %0, %1" + /* outputs: */ : "=f" (result) + /* inputs: */ : "f" (f)); + return result; +} + +/* + * __mulhw - Multiply High Word + */ +static inline int __mulhw (int a, int b) __attribute__((always_inline)); +static inline int +__mulhw (int a, int b) +{ + int result; + __asm__ ("mulhw %0, %1, %2" + /* outputs: */ : "=r" (result) + /* inputs: */ : "r" (a), "r"(b)); + return result; +} + +/* + * __mulhwu - Multiply High Word Unsigned + */ +static inline unsigned int __mulhwu (unsigned int a, unsigned int b) __attribute__((always_inline)); +static inline unsigned int +__mulhwu (unsigned int a, unsigned int b) +{ + unsigned int result; + __asm__ ("mulhwu %0, %1, %2" + /* outputs: */ : "=r" (result) + /* inputs: */ : "r" (a), "r"(b)); + return result; +} + +/* + * __stfiwx - Store Floating-Point as Integer Word Indexed + * + * void x(int, void *, int); + */ +#define __stfiwx(value, base, index) \ + __asm__ ("stfiwx %0, %1, %2" : /*no result*/ \ + : "f" (value), "b%" (index), "r" (base) : "memory") + + +/******************************************************************* + * Miscellaneous Functions * + *******************************************************************/ + +/* + * __nop - no operation (PowerPC preferred form) + * + * void __nop(); + */ +#define __nop() \ + __asm__ ("ori 0,0,0") + +/* + * __icbi - Instruction Cache Block Invalidate + * + * void __icbi(void *, int); + */ +#define __icbi(base, index) \ + __asm__ ("icbi %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory") + +/* + * __mffs - Move from FPSCR + */ +static inline double __mffs (void) __attribute__((always_inline)); +static inline double +__mffs (void) +{ + double result; + __asm__ volatile ("mffs %0" + /* outputs: */ : "=f" (result)); + return result; +} + +/* + * __mfspr - Move from Special Purpose Register + * + * int __mfspr(int); + */ +#define __mfspr(spr) \ + __extension__ ({ long __ppc_i_mfsprResult; \ + __asm__ volatile ("mfspr %0, %1" : "=r" (__ppc_i_mfsprResult) : "n" (spr)); \ + /*return*/ __ppc_i_mfsprResult; }) + +/* + * __mtfsf - Move to SPSCR Fields + * + * void __mtfsf(int, int); + */ +#define __mtfsf(mask, newValue) \ + __asm__ volatile ("mtfsf %0, %1" : : "n" (mask), "f" (newValue)) + +/* + * __mtspr - Move to Special Purpose Register + * + * __mtspr x(int, int); + */ +#define __mtspr(spr, value) \ + __asm__ volatile ("mtspr %0, %1" : : "n" (spr), "r" (value)) + +/* + * __OSReadSwapSInt16 + * + * lhbrx for signed shorts. This will do the required sign + * extension after load and byteswap. + */ +static inline signed short __OSReadSwapSInt16 (signed short *base, int index) __attribute__((always_inline)); +static inline signed short +__OSReadSwapSInt16 (signed short *base, int index) +{ + signed long result; + __asm__ volatile ("lhbrx %0, %1, %2" + /* outputs: */ : "=r" (result) + /* inputs: */ : "b%" (index), "r" (base) + /* clobbers: */ : "memory"); + return result; +} + +/* + * __OSReadSwapUInt16 + */ +static inline unsigned short __OSReadSwapUInt16 (volatile void *base, int inex) __attribute__((always_inline)); +static inline unsigned short +__OSReadSwapUInt16 (volatile void *base, int index) +{ + unsigned long result; + __asm__ volatile ("lhbrx %0, %1, %2" + /* outputs: */ : "=r" (result) + /* inputs: */ : "b" (index), "r" (base) + /* clobbers: */ : "memory"); + return result; +} + +/* + * __astrcmp - assembly strcmp + */ +static inline int astrcmp (const char *in_s1, const char *in_s2) __attribute__((always_inline)); +static inline int +astrcmp (const char *in_s1, const char *in_s2) +{ + int result, temp; + register const char *s1 = in_s1 - 1; + register const char *s2 = in_s2 - 1; + + __asm__ ("1:lbzu %0,1(%1)\n" + "\tcmpwi cr1,%0,0\n" + "\tlbzu %3,1(%2)\n" + "\tsubf. %0,%3,%0\n" + "\tbeq- cr1,2f\n" + "\tbeq+ 1b\n2:" + /* outputs: */ : "=&r" (result), "+b" (s1), "+b" (s2), "=r" (temp) + /* inputs: */ : + /* clobbers: */ : "cr0", "cr1", "memory"); + + return result; + + /* + * "=&r" (result) means: 'result' is written on (the '='), it's any GP + * register (the 'r'), and it must not be the same as + * any of the input registers (the '&'). + * "+b" (s1) means: 's1' is read from and written to (the '+'), + * and it must be a base GP register (i.e., not R0.) + * "=r" (temp) means: 'temp' is any GP reg and it's only written to. + * + * "memory" in the 'clobbers' section means that gcc will make + * sure that anything that should be in memory IS there + * before calling this routine. + */ +} + +#endif /* (defined(__ppc__) || defined(__ppc64__)) && ! defined(__MWERKS__) */ + +#endif /* _PPC_INTRINSICS_H_ */ diff --git a/include/gcc/darwin/4.2/stdarg.h b/include/gcc/darwin/4.2/stdarg.h new file mode 100644 index 0000000..e12e163 --- /dev/null +++ b/include/gcc/darwin/4.2/stdarg.h @@ -0,0 +1,6 @@ +/* This file is public domain. */ +#ifdef __MWERKS__ +#include "mw_stdarg.h" +#else +#error "This header only supports __MWERKS__." +#endif diff --git a/include/gcc/darwin/4.2/stdint.h b/include/gcc/darwin/4.2/stdint.h new file mode 100644 index 0000000..1126b56 --- /dev/null +++ b/include/gcc/darwin/4.2/stdint.h @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2000, 2001, 2003, 2004, 2008 Apple Computer, Inc. + * All rights reserved. + */ + +#ifndef _STDINT_H_ +#define _STDINT_H_ + +#if __LP64__ +#define __WORDSIZE 64 +#else +#define __WORDSIZE 32 +#endif + +/* from ISO/IEC 988:1999 spec */ + +/* 7.18.1.1 Exact-width integer types */ +#ifndef _INT8_T +#define _INT8_T +typedef signed char int8_t; +#endif /*_INT8_T */ + +#ifndef _INT16_T +#define _INT16_T +typedef short int16_t; +#endif /* _INT16_T */ + +#ifndef _INT32_T +#define _INT32_T +typedef int int32_t; +#endif /* _INT32_T */ + +#ifndef _INT64_T +#define _INT64_T +typedef long long int64_t; +#endif /* _INT64_T */ + +#ifndef _UINT8_T +#define _UINT8_T +typedef unsigned char uint8_t; +#endif /*_UINT8_T */ + +#ifndef _UINT16_T +#define _UINT16_T +typedef unsigned short uint16_t; +#endif /* _UINT16_T */ + +#ifndef _UINT32_T +#define _UINT32_T +typedef unsigned int uint32_t; +#endif /* _UINT32_T */ + +#ifndef _UINT64_T +#define _UINT64_T +typedef unsigned long long uint64_t; +#endif /* _UINT64_T */ + +/* 7.18.1.2 Minimum-width integer types */ +typedef int8_t int_least8_t; +typedef int16_t int_least16_t; +typedef int32_t int_least32_t; +typedef int64_t int_least64_t; +typedef uint8_t uint_least8_t; +typedef uint16_t uint_least16_t; +typedef uint32_t uint_least32_t; +typedef uint64_t uint_least64_t; + + +/* 7.18.1.3 Fastest-width integer types */ +typedef int8_t int_fast8_t; +typedef int16_t int_fast16_t; +typedef int32_t int_fast32_t; +typedef int64_t int_fast64_t; +typedef uint8_t uint_fast8_t; +typedef uint16_t uint_fast16_t; +typedef uint32_t uint_fast32_t; +typedef uint64_t uint_fast64_t; + + +/* 7.18.1.4 Integer types capable of holding object pointers */ + +#ifndef _INTPTR_T +#define _INTPTR_T +typedef long intptr_t; +#endif /* _INTPTR_T */ + +#ifndef _UINTPTR_T +#define _UINTPTR_T +typedef unsigned long uintptr_t; +#endif /* _UINTPTR_T */ + + +/* 7.18.1.5 Greatest-width integer types */ +#ifndef _INTMAX_T +#define _INTMAX_T +#ifdef __INTMAX_TYPE__ +typedef __INTMAX_TYPE__ intmax_t; +#else /* __INTMAX_TYPE__ */ +typedef long long intmax_t; +#endif /* __INTMAX_TYPE__ */ +#endif /* _INTMAX_T */ + +#ifndef _UINTMAX_T +#define _UINTMAX_T +#ifdef __UINTMAX_TYPE__ +typedef __UINTMAX_TYPE__ uintmax_t; +#else /* __UINTMAX_TYPE__ */ +typedef unsigned long long uintmax_t; +#endif /* __UINTMAX_TYPE__ */ +#endif /* _UINTMAX_T */ + +/* 7.18.2 Limits of specified-width integer types: + * These #defines specify the minimum and maximum limits + * of each of the types declared above. + */ + + +/* 7.18.2.1 Limits of exact-width integer types */ +#define INT8_MAX 127 +#define INT16_MAX 32767 +#define INT32_MAX 2147483647 +#define INT64_MAX 9223372036854775807LL + +#define INT8_MIN -128 +#define INT16_MIN -32768 + /* + Note: the literal "most negative int" cannot be written in C -- + the rules in the standard (section 6.4.4.1 in C99) will give it + an unsigned type, so INT32_MIN (and the most negative member of + any larger signed type) must be written via a constant expression. + */ +#define INT32_MIN (-INT32_MAX-1) +#define INT64_MIN (-INT64_MAX-1) + +#define UINT8_MAX 255 +#define UINT16_MAX 65535 +#define UINT32_MAX 4294967295U +#define UINT64_MAX 18446744073709551615ULL + +/* 7.18.2.2 Limits of minimum-width integer types */ +#define INT_LEAST8_MIN INT8_MIN +#define INT_LEAST16_MIN INT16_MIN +#define INT_LEAST32_MIN INT32_MIN +#define INT_LEAST64_MIN INT64_MIN + +#define INT_LEAST8_MAX INT8_MAX +#define INT_LEAST16_MAX INT16_MAX +#define INT_LEAST32_MAX INT32_MAX +#define INT_LEAST64_MAX INT64_MAX + +#define UINT_LEAST8_MAX UINT8_MAX +#define UINT_LEAST16_MAX UINT16_MAX +#define UINT_LEAST32_MAX UINT32_MAX +#define UINT_LEAST64_MAX UINT64_MAX + +/* 7.18.2.3 Limits of fastest minimum-width integer types */ +#define INT_FAST8_MIN INT8_MIN +#define INT_FAST16_MIN INT16_MIN +#define INT_FAST32_MIN INT32_MIN +#define INT_FAST64_MIN INT64_MIN + +#define INT_FAST8_MAX INT8_MAX +#define INT_FAST16_MAX INT16_MAX +#define INT_FAST32_MAX INT32_MAX +#define INT_FAST64_MAX INT64_MAX + +#define UINT_FAST8_MAX UINT8_MAX +#define UINT_FAST16_MAX UINT16_MAX +#define UINT_FAST32_MAX UINT32_MAX +#define UINT_FAST64_MAX UINT64_MAX + +/* 7.18.2.4 Limits of integer types capable of holding object pointers */ + +#if __WORDSIZE == 64 +#define INTPTR_MIN INT64_MIN +#define INTPTR_MAX INT64_MAX +#else +#define INTPTR_MIN INT32_MIN +#define INTPTR_MAX INT32_MAX +#endif + +#if __WORDSIZE == 64 +#define UINTPTR_MAX UINT64_MAX +#else +#define UINTPTR_MAX UINT32_MAX +#endif + +/* 7.18.2.5 Limits of greatest-width integer types */ +#define INTMAX_MIN INT64_MIN +#define INTMAX_MAX INT64_MAX + +#define UINTMAX_MAX UINT64_MAX + +/* 7.18.3 "Other" */ +#if __WORDSIZE == 64 +#define PTRDIFF_MIN INT64_MIN +#define PTRDIFF_MAX INT64_MAX +#else +#define PTRDIFF_MIN INT32_MIN +#define PTRDIFF_MAX INT32_MAX +#endif + +/* We have no sig_atomic_t yet, so no SIG_ATOMIC_{MIN,MAX}. + Should end up being {-127,127} or {0,255} ... or bigger. + My bet would be on one of {U}INT32_{MIN,MAX}. */ + +#if __WORDSIZE == 64 +#define SIZE_MAX UINT64_MAX +#else +#define SIZE_MAX UINT32_MAX +#endif + +#ifndef WCHAR_MAX +# ifdef __WCHAR_MAX__ +# define WCHAR_MAX __WCHAR_MAX__ +# else +# define WCHAR_MAX 0x7fffffff +# endif +#endif + +/* WCHAR_MIN should be 0 if wchar_t is an unsigned type and + (-WCHAR_MAX-1) if wchar_t is a signed type. Unfortunately, + it turns out that -fshort-wchar changes the signedness of + the type. */ +#ifndef WCHAR_MIN +# if WCHAR_MAX == 0xffff +# define WCHAR_MIN 0 +# else +# define WCHAR_MIN (-WCHAR_MAX-1) +# endif +#endif + +#define WINT_MIN INT32_MIN +#define WINT_MAX INT32_MAX + +#define SIG_ATOMIC_MIN INT32_MIN +#define SIG_ATOMIC_MAX INT32_MAX + +/* 7.18.4 Macros for integer constants */ +#define INT8_C(v) (v) +#define INT16_C(v) (v) +#define INT32_C(v) (v) +#define INT64_C(v) (v ## LL) + +#define UINT8_C(v) (v ## U) +#define UINT16_C(v) (v ## U) +#define UINT32_C(v) (v ## U) +#define UINT64_C(v) (v ## ULL) + +#define INTMAX_C(v) (v ## LL) +#define UINTMAX_C(v) (v ## ULL) + +#endif /* _STDINT_H_ */ diff --git a/include/gcc/darwin/4.2/varargs.h b/include/gcc/darwin/4.2/varargs.h new file mode 100644 index 0000000..83188ca --- /dev/null +++ b/include/gcc/darwin/4.2/varargs.h @@ -0,0 +1,6 @@ +/* This file is public domain. */ +#ifdef __MWERKS__ +#include "mw_varargs.h" +#else +#error "This header only supports __MWERKS__." +#endif diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/crt3.o b/lib/gcc/i686-apple-darwin10/4.2.1/crt3.o new file mode 100644 index 0000000..8609d11 Binary files /dev/null and b/lib/gcc/i686-apple-darwin10/4.2.1/crt3.o differ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/README b/lib/gcc/i686-apple-darwin10/4.2.1/include/README new file mode 100644 index 0000000..7086a77 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/README @@ -0,0 +1,14 @@ +This README file is copied into the directory for GCC-only header files +when fixincludes is run by the makefile for GCC. + +Many of the files in this directory were automatically edited from the +standard system header files by the fixincludes process. They are +system-specific, and will not work on any other kind of system. They +are also not part of GCC. The reason we have to do this is because +GCC requires ANSI C headers and many vendors supply ANSI-incompatible +headers. + +Because this is an automated process, sometimes headers get "fixed" +that do not, strictly speaking, need a fix. As long as nothing is broken +by the process, it is just an unfortunate collateral inconvenience. +We would like to rectify it, if it is not "too inconvenient". diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/ammintrin.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/ammintrin.h new file mode 100644 index 0000000..8a466d9 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/ammintrin.h @@ -0,0 +1,106 @@ +/* APPLE LOCAL file 5612787 mainline sse4 */ +/* Copyright (C) 2007 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the specification included in the AMD Programmers + Manual Update, version 2.x */ + +#ifndef _AMMINTRIN_H_INCLUDED +#define _AMMINTRIN_H_INCLUDED + +#ifndef __SSE4A__ +# error "SSE4A instruction set not enabled" +#else + +/* We need definitions from the SSE3, SSE2 and SSE header files*/ +#include + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#define __always_inline__ __always_inline__, __nodebug__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +/* APPLE LOCAL begin radar 5618945 */ +#undef __STATIC_INLINE +#ifdef __GNUC_STDC_INLINE__ +#define __STATIC_INLINE __inline +#else +#define __STATIC_INLINE static __inline +#endif +/* APPLE LOCAL end radar 5618945 */ + +__STATIC_INLINE void __attribute__((__always_inline__)) +_mm_stream_sd (double * __P, __m128d __Y) +{ + __builtin_ia32_movntsd (__P, (__v2df) __Y); +} + +__STATIC_INLINE void __attribute__((__always_inline__)) +_mm_stream_ss (float * __P, __m128 __Y) +{ + __builtin_ia32_movntss (__P, (__v4sf) __Y); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_extract_si64 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_extrq ((__v2di) __X, (__v16qi) __Y); +} + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_extracti_si64 (__m128i __X, unsigned const int __I, unsigned const int __L) +{ + return (__m128i) __builtin_ia32_extrqi ((__v2di) __X, __I, __L); +} +#else +#define _mm_extracti_si64(X, I, L) \ + ((__m128i) __builtin_ia32_extrqi ((__v2di)(X), I, L)) +#endif + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_insert_si64 (__m128i __X,__m128i __Y) +{ + return (__m128i) __builtin_ia32_insertq ((__v2di)__X, (__v2di)__Y); +} + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_inserti_si64(__m128i __X, __m128i __Y, unsigned const int __I, unsigned const int __L) +{ + return (__m128i) __builtin_ia32_insertqi ((__v2di)__X, (__v2di)__Y, __I, __L); +} +#else +#define _mm_inserti_si64(X, Y, I, L) \ + ((__m128i) __builtin_ia32_insertqi ((__v2di)(X), (__v2di)(Y), I, L)) +#endif + +#endif /* __SSE4A__ */ + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#undef __always_inline__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +#endif /* _AMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/decfloat.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/decfloat.h new file mode 100644 index 0000000..03e0a7b --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/decfloat.h @@ -0,0 +1,108 @@ +/* Copyright (C) 2005 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* + * Draft C Extension to support decimal floating-pointing arithmetic: + * Characteristics of decimal floating types + */ + +#ifndef _DECFLOAT_H___ +#define _DECFLOAT_H___ + +/* Number of base-FLT_RADIX digits in the significand, p. */ +#undef DEC32_MANT_DIG +#undef DEC64_MANT_DIG +#undef DEC128_MANT_DIG +#define DEC32_MANT_DIG __DEC32_MANT_DIG__ +#define DEC64_MANT_DIG __DEC64_MANT_DIG__ +#define DEC128_MANT_DIG __DEC128_MANT_DIG__ + +/* Minimum exponent. */ +#undef DEC32_MIN_EXP +#undef DEC64_MIN_EXP +#undef DEC128_MIN_EXP +#define DEC32_MIN_EXP __DEC32_MIN_EXP__ +#define DEC64_MIN_EXP __DEC64_MIN_EXP__ +#define DEC128_MIN_EXP __DEC128_MIN_EXP__ + +/* Maximum exponent. */ +#undef DEC32_MAX_EXP +#undef DEC64_MAX_EXP +#undef DEC128_MAX_EXP +#define DEC32_MAX_EXP __DEC32_MAX_EXP__ +#define DEC64_MAX_EXP __DEC64_MAX_EXP__ +#define DEC128_MAX_EXP __DEC128_MAX_EXP__ + +/* Maximum representable finite decimal floating-point number + (there are 6, 15, and 33 9s after the decimal points respectively). */ +#undef DEC32_MAX +#undef DEC64_MAX +#undef DEC128_MAX +#define DEC32_MAX __DEC32_MAX__ +#define DEC64_MAX __DEC64_MAX__ +#define DEC128_MAX __DEC128_MAX__ + +/* The difference between 1 and the least value greater than 1 that is + representable in the given floating point type. */ +#undef DEC32_EPSILON +#undef DEC64_EPSILON +#undef DEC128_EPSILON +#define DEC32_EPSILON __DEC32_EPSILON__ +#define DEC64_EPSILON __DEC64_EPSILON__ +#define DEC128_EPSILON __DEC128_EPSILON__ + +/* Minimum normalized positive floating-point number. */ +#undef DEC32_MIN +#undef DEC64_MIN +#undef DEC128_MIN +#define DEC32_MIN __DEC32_MIN__ +#define DEC64_MIN __DEC64_MIN__ +#define DEC128_MIN __DEC128_MIN__ + +/* Minimum denormalized positive floating-point number. */ +#undef DEC32_DEN +#undef DEC64_DEN +#undef DEC128_DEN +#define DEC32_DEN __DEC32_DEN__ +#define DEC64_DEN __DEC64_DEN__ +#define DEC128_DEN __DEC128_DEN__ + +/* The floating-point expression evaluation method. + -1 indeterminate + 0 evaluate all operations and constants just to the range and + precision of the type + 1 evaluate operations and constants of type _Decimal32 + and _Decimal64 to the range and precision of the _Decimal64 + type, evaluate _Decimal128 operations and constants to the + range and precision of the _Decimal128 type; + 2 evaluate all operations and constants to the range and + precision of the _Decimal128 type. +*/ + +#undef DECFLT_EVAL_METHOD +#define DECFLT_EVAL_METHOD __DECFLT_EVAL_METHOD__ + +#endif /* _DECFLOAT_H___ */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/emmintrin.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/emmintrin.h new file mode 100644 index 0000000..857ea6f --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/emmintrin.h @@ -0,0 +1,1981 @@ +/* APPLE LOCAL file mainline 2005-06-30 Radar 4131077 */ +/* Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef _EMMINTRIN_H_INCLUDED +#define _EMMINTRIN_H_INCLUDED + +#ifdef __SSE2__ +#include + +/* SSE2 */ +typedef double __v2df __attribute__ ((__vector_size__ (16))); +typedef long long __v2di __attribute__ ((__vector_size__ (16))); +typedef int __v4si __attribute__ ((__vector_size__ (16))); +typedef short __v8hi __attribute__ ((__vector_size__ (16))); +typedef char __v16qi __attribute__ ((__vector_size__ (16))); + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__)); +typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__)); + +/* Create a selector for use with the SHUFPD instruction. */ +#define _MM_SHUFFLE2(fp1,fp0) \ + (((fp1) << 1) | (fp0)) + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#define __always_inline__ __always_inline__, __nodebug__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +/* APPLE LOCAL begin radar 5618945 */ +#undef __STATIC_INLINE +#ifdef __GNUC_STDC_INLINE__ +#define __STATIC_INLINE __inline +#else +#define __STATIC_INLINE static __inline +#endif +/* APPLE LOCAL end radar 5618945 */ + +/* APPLE LOCAL begin radar 4152603 */ +/* Create a vector with element 0 as F and the rest zero. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_sd (double __F) +{ + return __extension__ (__m128d){ __F, 0 }; +} + +/* Create a vector with both elements equal to F. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_pd (double __F) +{ + return __extension__ (__m128d){ __F, __F }; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_pd1 (double __F) +{ + return _mm_set1_pd (__F); +} + +/* Create a vector with the lower value X and upper value W. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_pd (double __W, double __X) +{ + return __extension__ (__m128d){ __X, __W }; +} + +/* Create a vector with the lower value W and upper value X. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_pd (double __W, double __X) +{ + return __extension__ (__m128d){ __W, __X }; +} + +/* Create a vector of zeros. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setzero_pd (void) +{ + return __extension__ (__m128d){ 0.0, 0.0 }; +} + +/* Sets the low DPFP value of A from the low value of B. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_move_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B); +} + +/* Load two DPFP values from P. The address must be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load_pd (double const *__P) +{ + return *(__m128d *)__P; +} + +/* Load two DPFP values from P. The address need not be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadu_pd (double const *__P) +{ + return __builtin_ia32_loadupd (__P); +} + +/* Create a vector with all two elements equal to *P. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load1_pd (double const *__P) +{ + return _mm_set1_pd (*__P); +} + +/* Create a vector with element 0 as *P and the rest zero. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load_sd (double const *__P) +{ + return _mm_set_sd (*__P); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load_pd1 (double const *__P) +{ + return _mm_load1_pd (__P); +} + +/* Load two DPFP values in reverse order. The address must be aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadr_pd (double const *__P) +{ + __m128d __tmp = _mm_load_pd (__P); + return __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,1)); +} + +/* Store two DPFP values. The address must be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store_pd (double *__P, __m128d __A) +{ + *(__m128d *)__P = __A; +} + +/* Store two DPFP values. The address need not be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storeu_pd (double *__P, __m128d __A) +{ + __builtin_ia32_storeupd (__P, __A); +} + +/* Stores the lower DPFP value. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store_sd (double *__P, __m128d __A) +{ + *__P = __builtin_ia32_vec_ext_v2df (__A, 0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE double __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsd_f64 (__m128d __A) +{ + return __builtin_ia32_vec_ext_v2df (__A, 0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storel_pd (double *__P, __m128d __A) +{ + _mm_store_sd (__P, __A); +} + +/* Stores the upper DPFP value. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storeh_pd (double *__P, __m128d __A) +{ + *__P = __builtin_ia32_vec_ext_v2df (__A, 1); +} + +/* Store the lower DPFP value across two words. + The address must be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store1_pd (double *__P, __m128d __A) +{ + _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,0))); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store_pd1 (double *__P, __m128d __A) +{ + _mm_store1_pd (__P, __A); +} + +/* Store two DPFP values in reverse order. The address must be aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storer_pd (double *__P, __m128d __A) +{ + _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,1))); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi128_si32 (__m128i __A) +{ + return __builtin_ia32_vec_ext_v4si ((__v4si)__A, 0); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi128_si64 (__m128i __A) +{ + return __builtin_ia32_vec_ext_v2di ((__v2di)__A, 0); +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi128_si64x (__m128i __A) +{ + return __builtin_ia32_vec_ext_v2di ((__v2di)__A, 0); +} +#endif + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_addpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_addsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_subpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_subsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mul_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_mulpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mul_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_mulsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_div_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_divpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_div_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_divsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sqrt_pd (__m128d __A) +{ + return (__m128d)__builtin_ia32_sqrtpd ((__v2df)__A); +} + +/* Return pair {sqrt (A[0), B[1]}. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sqrt_sd (__m128d __A, __m128d __B) +{ + __v2df __tmp = __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B); + return (__m128d)__builtin_ia32_sqrtsd ((__v2df)__tmp); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_min_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_minpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_min_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_minsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_max_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_maxpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_max_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_maxsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_and_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_andpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_andnot_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_andnpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_or_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_orpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_xor_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_xorpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpeqpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmplt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpltpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmple_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmplepd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpgtpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpge_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpgepd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpneq_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpneqpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnlt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnltpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnle_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnlepd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpngt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpngtpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnge_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpngepd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpord_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpordpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpunord_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpunordpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpeqsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmplt_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpltsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmple_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmplesd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmpltsd ((__v2df) __B, + (__v2df) + __A)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpge_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmplesd ((__v2df) __B, + (__v2df) + __A)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpneq_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpneqsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnlt_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnltsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnle_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnlesd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpngt_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmpnltsd ((__v2df) __B, + (__v2df) + __A)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnge_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmpnlesd ((__v2df) __B, + (__v2df) + __A)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpord_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpordsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpunord_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpunordsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comieq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdeq ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comilt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdlt ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comile_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdle ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comigt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdgt ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comige_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdge ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comineq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdneq ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomieq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdeq ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomilt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdlt ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomile_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdle ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomigt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdgt ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomige_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdge ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomineq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdneq ((__v2df)__A, (__v2df)__B); +} + +/* Create a vector of Qi, where i is the element number. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_epi64x (long long __q1, long long __q0) +{ + return __extension__ (__m128i)(__v2di){ __q0, __q1 }; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_epi64 (__m64 __q1, __m64 __q0) +{ + return _mm_set_epi64x ((long long)__q1, (long long)__q0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_epi32 (int __q3, int __q2, int __q1, int __q0) +{ + return __extension__ (__m128i)(__v4si){ __q0, __q1, __q2, __q3 }; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_epi16 (short __q7, short __q6, short __q5, short __q4, + short __q3, short __q2, short __q1, short __q0) +{ + return __extension__ (__m128i)(__v8hi){ + __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 }; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_epi8 (char __q15, char __q14, char __q13, char __q12, + char __q11, char __q10, char __q09, char __q08, + char __q07, char __q06, char __q05, char __q04, + char __q03, char __q02, char __q01, char __q00) +{ + return __extension__ (__m128i)(__v16qi){ + __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, + __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15 + }; +} + +/* APPLE LOCAL begin 4220129 */ +/* functions moved to end of file */ +/* APPLE LOCAL end 4220129 */ + +/* Create a vector of Qi, where i is the element number. + The parameter order is reversed from the _mm_set_epi* functions. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_epi64 (__m64 __q0, __m64 __q1) +{ + return _mm_set_epi64 (__q1, __q0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3) +{ + return _mm_set_epi32 (__q3, __q2, __q1, __q0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3, + short __q4, short __q5, short __q6, short __q7) +{ + return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03, + char __q04, char __q05, char __q06, char __q07, + char __q08, char __q09, char __q10, char __q11, + char __q12, char __q13, char __q14, char __q15) +{ + return _mm_set_epi8 (__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08, + __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00); +} + +/* Create a vector with element 0 as *P and the rest zero. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load_si128 (__m128i const *__P) +{ + return *__P; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadu_si128 (__m128i const *__P) +{ + return (__m128i) __builtin_ia32_loaddqu ((char const *)__P); +} + +/* APPLE LOCAL begin 4099020 */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadl_epi64 (__m128i const *__P) +{ + return (__m128i)__builtin_ia32_loadlv4si ((__v2si *)__P); +} +/* APPLE LOCAL end 4099020 */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store_si128 (__m128i *__P, __m128i __B) +{ + *__P = __B; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storeu_si128 (__m128i *__P, __m128i __B) +{ + __builtin_ia32_storedqu ((char *)__P, (__v16qi)__B); +} + +/* APPLE LOCAL begin 4099020 */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storel_epi64 (__m128i *__P, __m128i __B) +{ + __builtin_ia32_storelv4si ((__v2si *)__P, __B); +} +/* APPLE LOCAL end 4099020 */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movepi64_pi64 (__m128i __B) +{ + return (__m64) __builtin_ia32_vec_ext_v2di ((__v2di)__B, 0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movpi64_epi64 (__m64 __A) +{ + return _mm_set_epi64 ((__m64)0LL, __A); +} + +/* APPLE LOCAL begin 4099020 */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_move_epi64 (__m128i __A) +{ + return (__m128i)__builtin_ia32_movqv4si ((__v4si)__A) ; +} +/* APPLE LOCAL end 4099020 */ + +/* Create a vector of zeros. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setzero_si128 (void) +{ + return __extension__ (__m128i)(__v4si){ 0, 0, 0, 0 }; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtepi32_pd (__m128i __A) +{ + return (__m128d)__builtin_ia32_cvtdq2pd ((__v4si) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtepi32_ps (__m128i __A) +{ + return (__m128)__builtin_ia32_cvtdq2ps ((__v4si) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpd_epi32 (__m128d __A) +{ + return (__m128i)__builtin_ia32_cvtpd2dq ((__v2df) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpd_pi32 (__m128d __A) +{ + return (__m64)__builtin_ia32_cvtpd2pi ((__v2df) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpd_ps (__m128d __A) +{ + return (__m128)__builtin_ia32_cvtpd2ps ((__v2df) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttpd_epi32 (__m128d __A) +{ + return (__m128i)__builtin_ia32_cvttpd2dq ((__v2df) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttpd_pi32 (__m128d __A) +{ + return (__m64)__builtin_ia32_cvttpd2pi ((__v2df) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpi32_pd (__m64 __A) +{ + return (__m128d)__builtin_ia32_cvtpi2pd ((__v2si) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtps_epi32 (__m128 __A) +{ + return (__m128i)__builtin_ia32_cvtps2dq ((__v4sf) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttps_epi32 (__m128 __A) +{ + return (__m128i)__builtin_ia32_cvttps2dq ((__v4sf) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtps_pd (__m128 __A) +{ + return (__m128d)__builtin_ia32_cvtps2pd ((__v4sf) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsd_si32 (__m128d __A) +{ + return __builtin_ia32_cvtsd2si ((__v2df) __A); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsd_si64 (__m128d __A) +{ + return __builtin_ia32_cvtsd2si64 ((__v2df) __A); +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsd_si64x (__m128d __A) +{ + return __builtin_ia32_cvtsd2si64 ((__v2df) __A); +} +#endif + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttsd_si32 (__m128d __A) +{ + return __builtin_ia32_cvttsd2si ((__v2df) __A); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttsd_si64 (__m128d __A) +{ + return __builtin_ia32_cvttsd2si64 ((__v2df) __A); +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttsd_si64x (__m128d __A) +{ + return __builtin_ia32_cvttsd2si64 ((__v2df) __A); +} +#endif + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsd_ss (__m128 __A, __m128d __B) +{ + return (__m128)__builtin_ia32_cvtsd2ss ((__v4sf) __A, (__v2df) __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi32_sd (__m128d __A, int __B) +{ + return (__m128d)__builtin_ia32_cvtsi2sd ((__v2df) __A, __B); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64_sd (__m128d __A, long long __B) +{ + return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B); +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64x_sd (__m128d __A, long long __B) +{ + return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B); +} +#endif + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtss_sd (__m128d __A, __m128 __B) +{ + return (__m128d)__builtin_ia32_cvtss2sd ((__v2df) __A, (__v4sf)__B); +} + +/* APPLE LOCAL 5814283 */ +#define _mm_shuffle_pd(__A, __B, __C) ((__m128d)__builtin_ia32_shufpd ((__v2df)(__A), (__v2df)(__B), (__C))) + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_unpckhpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_unpcklpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadh_pd (__m128d __A, double const *__B) +{ + return (__m128d)__builtin_ia32_loadhpd ((__v2df)__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadl_pd (__m128d __A, double const *__B) +{ + return (__m128d)__builtin_ia32_loadlpd ((__v2df)__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movemask_pd (__m128d __A) +{ + return __builtin_ia32_movmskpd ((__v2df)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_packs_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_packsswb128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_packs_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_packssdw128 ((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_packus_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_packuswb128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhbw128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhwd128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhdq128 ((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhqdq128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpcklbw128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpcklwd128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckldq128 ((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpcklqdq128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddd128 ((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddq128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_adds_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddsb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_adds_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddsw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_adds_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddusb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_adds_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddusw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubd128 ((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubq128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_subs_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubsb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_subs_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubsw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_subs_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubusb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_subs_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubusw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_madd_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmaddwd128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mulhi_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmulhw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mullo_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmullw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mul_su32 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pmuludq ((__v2si)__A, (__v2si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mul_epu32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmuludq128 ((__v4si)__A, (__v4si)__B); +} + +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_slli_epi16 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psllwi128 ((__v8hi)__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_slli_epi32 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_pslldi128 ((__v4si)__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_slli_epi64 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psllqi128 ((__v2di)__A, __B); +} +#else +#define _mm_slli_epi16(__A, __B) \ + ((__m128i)__builtin_ia32_psllwi128 ((__v8hi)(__A), __B)) +#define _mm_slli_epi32(__A, __B) \ + ((__m128i)__builtin_ia32_pslldi128 ((__v8hi)(__A), __B)) +#define _mm_slli_epi64(__A, __B) \ + ((__m128i)__builtin_ia32_psllqi128 ((__v8hi)(__A), __B)) +#endif + +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srai_epi16 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrawi128 ((__v8hi)__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srai_epi32 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psradi128 ((__v4si)__A, __B); +} +#else +#define _mm_srai_epi16(__A, __B) \ + ((__m128i)__builtin_ia32_psrawi128 ((__v8hi)(__A), __B)) +#define _mm_srai_epi32(__A, __B) \ + ((__m128i)__builtin_ia32_psradi128 ((__v8hi)(__A), __B)) +#endif + +#if 0 +static __m128i __attribute__((__always_inline__)) +_mm_srli_si128 (__m128i __A, int __B) +{ + return ((__m128i)__builtin_ia32_psrldqi128 (__A, __B * 8)); +} + +static __m128i __attribute__((__always_inline__)) +_mm_srli_si128 (__m128i __A, int __B) +{ + return ((__m128i)__builtin_ia32_pslldqi128 (__A, __B * 8)); +} +#else +/* APPLE LOCAL begin 5919583 */ +#define _mm_srli_si128 (__m128i)__builtin_ia32_psrldqi128_byteshift +#define _mm_slli_si128 (__m128i)__builtin_ia32_pslldqi128_byteshift +/* APPLE LOCAL end 5919583 */ +#endif + +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srli_epi16 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrlwi128 ((__v8hi)__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srli_epi32 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrldi128 ((__v4si)__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srli_epi64 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrlqi128 ((__v2di)__A, __B); +} +#else +#define _mm_srli_epi16(__A, __B) \ + ((__m128i)__builtin_ia32_psrlwi128 ((__v8hi)(__A), __B)) +#define _mm_srli_epi32(__A, __B) \ + ((__m128i)__builtin_ia32_psrldi128 ((__v4si)(__A), __B)) +#define _mm_srli_epi64(__A, __B) \ + ((__m128i)__builtin_ia32_psrlqi128 ((__v4si)(__A), __B)) +#endif + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sll_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psllw128((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sll_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pslld128((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sll_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psllq128((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sra_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psraw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sra_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrad128 ((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srl_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrlw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srl_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrld128 ((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srl_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrlq128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_and_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pand128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_andnot_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pandn128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_or_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_por128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_xor_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pxor128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpeqb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpeqw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpeqd128 ((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmplt_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__B, (__v16qi)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmplt_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__B, (__v8hi)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmplt_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__B, (__v4si)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__A, (__v4si)__B); +} + +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_extract_epi16 (__m128i const __A, int const __N) +{ + return __builtin_ia32_vec_ext_v8hi ((__v8hi)__A, __N); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_insert_epi16 (__m128i const __A, int const __D, int const __N) +{ + return (__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)__A, __D, __N); +} +#else +#define _mm_extract_epi16(A, N) \ + ((int) __builtin_ia32_vec_ext_v8hi ((__v8hi)(A), (N))) +#define _mm_insert_epi16(A, D, N) \ + ((__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)(A), (D), (N))) +#endif + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_max_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmaxsw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_max_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmaxub128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_min_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pminsw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_min_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pminub128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movemask_epi8 (__m128i __A) +{ + return __builtin_ia32_pmovmskb128 ((__v16qi)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mulhi_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmulhuw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin 5814283 */ +#define _mm_shufflehi_epi16(__A, __B) ((__m128i)__builtin_ia32_pshufhw ((__v8hi)(__A), __B)) +#define _mm_shufflelo_epi16(__A, __B) ((__m128i)__builtin_ia32_pshuflw ((__v8hi)(__A), __B)) +#define _mm_shuffle_epi32(__A, __B) ((__m128i)__builtin_ia32_pshufd ((__v4si)(__A), __B)) +/* APPLE LOCAL end 5814283 */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C) +{ + __builtin_ia32_maskmovdqu ((__v16qi)__A, (__v16qi)__B, __C); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_avg_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pavgb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_avg_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pavgw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sad_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psadbw128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_stream_si32 (int *__A, int __B) +{ + __builtin_ia32_movnti (__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_stream_si128 (__m128i *__A, __m128i __B) +{ + __builtin_ia32_movntdq ((__v2di *)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_stream_pd (double *__A, __m128d __B) +{ + __builtin_ia32_movntpd (__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_clflush (void const *__A) +{ + __builtin_ia32_clflush (__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_lfence (void) +{ + __builtin_ia32_lfence (); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mfence (void) +{ + __builtin_ia32_mfence (); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi32_si128 (int __A) +{ + return _mm_set_epi32 (0, 0, 0, __A); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64_si128 (long long __A) +{ + return _mm_set_epi64x (0, __A); +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64x_si128 (long long __A) +{ + return _mm_set_epi64x (0, __A); +} +#endif + +/* Casts between various SP, DP, INT vector types. Note that these do no + conversion of values, they just change the type. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_castpd_ps(__m128d __A) +{ + return (__m128) __A; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_castpd_si128(__m128d __A) +{ + return (__m128i) __A; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_castps_pd(__m128 __A) +{ + return (__m128d) __A; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_castps_si128(__m128 __A) +{ + return (__m128i) __A; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_castsi128_ps(__m128i __A) +{ + return (__m128) __A; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_castsi128_pd(__m128i __A) +{ + return (__m128d) __A; +} +/* APPLE LOCAL end radar 4152603 */ + +/* APPLE LOCAL begin 4220129, 4286110 */ +/* Set all of the elements of the vector to A. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_epi64x (long long __A) +{ + return _mm_set_epi64x (__A, __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_epi64 (__m64 __A) +{ + return _mm_set_epi64 (__A, __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_epi32 (int __A) +{ + return _mm_set_epi32 (__A, __A, __A, __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_epi16 (short __A) +{ + __m128i temp, temp2, temp3; + temp = _mm_cvtsi32_si128((int)__A); + temp2 = _mm_unpacklo_epi16(temp, temp); + temp3 = _mm_shuffle_epi32(temp2, 0); + return temp3; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_epi8 (char __A) +{ + __m128i temp, temp2, temp3, temp4; + temp = _mm_cvtsi32_si128 ((int)__A); + temp2 = _mm_unpacklo_epi8 (temp, temp); + temp3 = _mm_unpacklo_epi8 (temp2, temp2); + temp4 = _mm_shuffle_epi32 (temp3, 0); + return temp4; +} +/* APPLE LOCAL end 4220129, 4286110 */ + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#undef __always_inline__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +#endif /* __SSE2__ */ + +#endif /* _EMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/float.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/float.h new file mode 100644 index 0000000..1337f6b --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/float.h @@ -0,0 +1,164 @@ +/* Copyright (C) 2002 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* + * ISO C Standard: 5.2.4.2.2 Characteristics of floating types + */ + +#ifndef _FLOAT_H___ +#define _FLOAT_H___ + +/* Radix of exponent representation, b. */ +#undef FLT_RADIX +#define FLT_RADIX __FLT_RADIX__ + +/* Number of base-FLT_RADIX digits in the significand, p. */ +#undef FLT_MANT_DIG +#undef DBL_MANT_DIG +#undef LDBL_MANT_DIG +#define FLT_MANT_DIG __FLT_MANT_DIG__ +#define DBL_MANT_DIG __DBL_MANT_DIG__ +#define LDBL_MANT_DIG __LDBL_MANT_DIG__ + +/* Number of decimal digits, q, such that any floating-point number with q + decimal digits can be rounded into a floating-point number with p radix b + digits and back again without change to the q decimal digits, + + p * log10(b) if b is a power of 10 + floor((p - 1) * log10(b)) otherwise +*/ +#undef FLT_DIG +#undef DBL_DIG +#undef LDBL_DIG +#define FLT_DIG __FLT_DIG__ +#define DBL_DIG __DBL_DIG__ +#define LDBL_DIG __LDBL_DIG__ + +/* Minimum int x such that FLT_RADIX**(x-1) is a normalized float, emin */ +#undef FLT_MIN_EXP +#undef DBL_MIN_EXP +#undef LDBL_MIN_EXP +#define FLT_MIN_EXP __FLT_MIN_EXP__ +#define DBL_MIN_EXP __DBL_MIN_EXP__ +#define LDBL_MIN_EXP __LDBL_MIN_EXP__ + +/* Minimum negative integer such that 10 raised to that power is in the + range of normalized floating-point numbers, + + ceil(log10(b) * (emin - 1)) +*/ +#undef FLT_MIN_10_EXP +#undef DBL_MIN_10_EXP +#undef LDBL_MIN_10_EXP +#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__ +#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__ +#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__ + +/* Maximum int x such that FLT_RADIX**(x-1) is a representable float, emax. */ +#undef FLT_MAX_EXP +#undef DBL_MAX_EXP +#undef LDBL_MAX_EXP +#define FLT_MAX_EXP __FLT_MAX_EXP__ +#define DBL_MAX_EXP __DBL_MAX_EXP__ +#define LDBL_MAX_EXP __LDBL_MAX_EXP__ + +/* Maximum integer such that 10 raised to that power is in the range of + representable finite floating-point numbers, + + floor(log10((1 - b**-p) * b**emax)) +*/ +#undef FLT_MAX_10_EXP +#undef DBL_MAX_10_EXP +#undef LDBL_MAX_10_EXP +#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__ +#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__ +#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__ + +/* Maximum representable finite floating-point number, + + (1 - b**-p) * b**emax +*/ +#undef FLT_MAX +#undef DBL_MAX +#undef LDBL_MAX +#define FLT_MAX __FLT_MAX__ +#define DBL_MAX __DBL_MAX__ +#define LDBL_MAX __LDBL_MAX__ + +/* The difference between 1 and the least value greater than 1 that is + representable in the given floating point type, b**1-p. */ +#undef FLT_EPSILON +#undef DBL_EPSILON +#undef LDBL_EPSILON +#define FLT_EPSILON __FLT_EPSILON__ +#define DBL_EPSILON __DBL_EPSILON__ +#define LDBL_EPSILON __LDBL_EPSILON__ + +/* Minimum normalized positive floating-point number, b**(emin - 1). */ +#undef FLT_MIN +#undef DBL_MIN +#undef LDBL_MIN +#define FLT_MIN __FLT_MIN__ +#define DBL_MIN __DBL_MIN__ +#define LDBL_MIN __LDBL_MIN__ + +/* Addition rounds to 0: zero, 1: nearest, 2: +inf, 3: -inf, -1: unknown. */ +/* APPLE LOCAL begin 3399553 */ +/* This changes with calls to fesetround in . */ +#undef FLT_ROUNDS +#define FLT_ROUNDS (__builtin_flt_rounds ()) +/* APPLE LOCAL end 3399553 */ + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +/* The floating-point expression evaluation method. + -1 indeterminate + 0 evaluate all operations and constants just to the range and + precision of the type + 1 evaluate operations and constants of type float and double + to the range and precision of the double type, evaluate + long double operations and constants to the range and + precision of the long double type + 2 evaluate all operations and constants to the range and + precision of the long double type + + ??? This ought to change with the setting of the fp control word; + the value provided by the compiler assumes the widest setting. */ +#undef FLT_EVAL_METHOD +#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__ + +/* Number of decimal digits, n, such that any floating-point number in the + widest supported floating type with pmax radix b digits can be rounded + to a floating-point number with n decimal digits and back again without + change to the value, + + pmax * log10(b) if b is a power of 10 + ceil(1 + pmax * log10(b)) otherwise +*/ +#undef DECIMAL_DIG +#define DECIMAL_DIG __DECIMAL_DIG__ + +#endif /* C99 */ +#endif /* _FLOAT_H___ */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/iso646.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/iso646.h new file mode 100644 index 0000000..445d372 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/iso646.h @@ -0,0 +1,48 @@ +/* Copyright (C) 1997, 1999 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* + * ISO C Standard: 7.9 Alternative spellings + */ + +#ifndef _ISO646_H +#define _ISO646_H + +#ifndef __cplusplus +#define and && +#define and_eq &= +#define bitand & +#define bitor | +#define compl ~ +#define not ! +#define not_eq != +#define or || +#define or_eq |= +#define xor ^ +#define xor_eq ^= +#endif + +#endif diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/limits.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/limits.h new file mode 100644 index 0000000..16417a2 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/limits.h @@ -0,0 +1,118 @@ +/* This administrivia gets added to the beginning of limits.h + if the system has its own version of limits.h. */ + +/* APPLE LOCAL begin 4401222 */ +#ifndef _LIBC_LIMITS_H_ +/* Use "..." so that we find syslimits.h only in this same directory. */ +#include "syslimits.h" +#endif +#ifdef _GCC_NEXT_LIMITS_H +#include_next +#undef _GCC_NEXT_LIMITS_H +#endif +/* APPLE LOCAL end 4401222 */ +#ifndef _LIMITS_H___ +#define _LIMITS_H___ + +/* Number of bits in a `char'. */ +#undef CHAR_BIT +#define CHAR_BIT __CHAR_BIT__ + +/* Maximum length of a multibyte character. */ +#ifndef MB_LEN_MAX +#define MB_LEN_MAX 1 +#endif + +/* Minimum and maximum values a `signed char' can hold. */ +#undef SCHAR_MIN +#define SCHAR_MIN (-SCHAR_MAX - 1) +#undef SCHAR_MAX +#define SCHAR_MAX __SCHAR_MAX__ + +/* Maximum value an `unsigned char' can hold. (Minimum is 0). */ +#undef UCHAR_MAX +#if __SCHAR_MAX__ == __INT_MAX__ +# define UCHAR_MAX (SCHAR_MAX * 2U + 1U) +#else +# define UCHAR_MAX (SCHAR_MAX * 2 + 1) +#endif + +/* Minimum and maximum values a `char' can hold. */ +#ifdef __CHAR_UNSIGNED__ +# undef CHAR_MIN +# if __SCHAR_MAX__ == __INT_MAX__ +# define CHAR_MIN 0U +# else +# define CHAR_MIN 0 +# endif +# undef CHAR_MAX +# define CHAR_MAX UCHAR_MAX +#else +# undef CHAR_MIN +# define CHAR_MIN SCHAR_MIN +# undef CHAR_MAX +# define CHAR_MAX SCHAR_MAX +#endif + +/* Minimum and maximum values a `signed short int' can hold. */ +#undef SHRT_MIN +#define SHRT_MIN (-SHRT_MAX - 1) +#undef SHRT_MAX +#define SHRT_MAX __SHRT_MAX__ + +/* Maximum value an `unsigned short int' can hold. (Minimum is 0). */ +#undef USHRT_MAX +#if __SHRT_MAX__ == __INT_MAX__ +# define USHRT_MAX (SHRT_MAX * 2U + 1U) +#else +# define USHRT_MAX (SHRT_MAX * 2 + 1) +#endif + +/* Minimum and maximum values a `signed int' can hold. */ +#undef INT_MIN +#define INT_MIN (-INT_MAX - 1) +#undef INT_MAX +#define INT_MAX __INT_MAX__ + +/* Maximum value an `unsigned int' can hold. (Minimum is 0). */ +#undef UINT_MAX +#define UINT_MAX (INT_MAX * 2U + 1U) + +/* Minimum and maximum values a `signed long int' can hold. + (Same as `int'). */ +#undef LONG_MIN +#define LONG_MIN (-LONG_MAX - 1L) +#undef LONG_MAX +#define LONG_MAX __LONG_MAX__ + +/* Maximum value an `unsigned long int' can hold. (Minimum is 0). */ +#undef ULONG_MAX +#define ULONG_MAX (LONG_MAX * 2UL + 1UL) + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +/* Minimum and maximum values a `signed long long int' can hold. */ +# undef LLONG_MIN +# define LLONG_MIN (-LLONG_MAX - 1LL) +# undef LLONG_MAX +# define LLONG_MAX __LONG_LONG_MAX__ + +/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */ +# undef ULLONG_MAX +# define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL) +#endif + +#if defined (__GNU_LIBRARY__) ? defined (__USE_GNU) : !defined (__STRICT_ANSI__) +/* Minimum and maximum values a `signed long long int' can hold. */ +# undef LONG_LONG_MIN +# define LONG_LONG_MIN (-LONG_LONG_MAX - 1LL) +# undef LONG_LONG_MAX +# define LONG_LONG_MAX __LONG_LONG_MAX__ + +/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */ +# undef ULONG_LONG_MAX +# define ULONG_LONG_MAX (LONG_LONG_MAX * 2ULL + 1ULL) +#endif + +#endif /* _LIMITS_H___ */ +/* APPLE LOCAL begin 4401222 */ +/* APPLE LOCAL end 4401222 */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/mm3dnow.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/mm3dnow.h new file mode 100644 index 0000000..7fdc6dc --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/mm3dnow.h @@ -0,0 +1,220 @@ +/* Copyright (C) 2004 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the mm3dnow.h (of supposedly AMD origin) included with + MSVC 7.1. */ + +#ifndef _MM3DNOW_H_INCLUDED +#define _MM3DNOW_H_INCLUDED + +#ifdef __3dNOW__ + +#include + +/* Internal data types for implementing the intrinsics. */ +typedef float __v2sf __attribute__ ((__vector_size__ (8))); + +static __inline void +_m_femms (void) +{ + __builtin_ia32_femms(); +} + +static __inline __m64 +_m_pavgusb (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pavgusb ((__v8qi)__A, (__v8qi)__B); +} + +static __inline __m64 +_m_pf2id (__m64 __A) +{ + return (__m64)__builtin_ia32_pf2id ((__v2sf)__A); +} + +static __inline __m64 +_m_pfacc (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfacc ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfadd (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfadd ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfcmpeq (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfcmpeq ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfcmpge (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfcmpge ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfcmpgt (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfcmpgt ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfmax (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfmax ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfmin (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfmin ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfmul (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfmul ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfrcp (__m64 __A) +{ + return (__m64)__builtin_ia32_pfrcp ((__v2sf)__A); +} + +static __inline __m64 +_m_pfrcpit1 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfrcpit1 ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfrcpit2 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfrcpit2 ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfrsqrt (__m64 __A) +{ + return (__m64)__builtin_ia32_pfrsqrt ((__v2sf)__A); +} + +static __inline __m64 +_m_pfrsqit1 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfrsqit1 ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfsub (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfsub ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfsubr (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfsubr ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pi2fd (__m64 __A) +{ + return (__m64)__builtin_ia32_pi2fd ((__v2si)__A); +} + +static __inline __m64 +_m_pmulhrw (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pmulhrw ((__v4hi)__A, (__v4hi)__B); +} + +static __inline void +_m_prefetch (void *__P) +{ + __builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */); +} + +static __inline void +_m_prefetchw (void *__P) +{ + __builtin_prefetch (__P, 1, 3 /* _MM_HINT_T0 */); +} + +static __inline __m64 +_m_from_float (float __A) +{ + return (__m64)(__v2sf){ __A, 0 }; +} + +static __inline float +_m_to_float (__m64 __A) +{ + union { __v2sf v; float a[2]; } __tmp = { (__v2sf)__A }; + return __tmp.a[0]; +} + +#ifdef __3dNOW_A__ + +static __inline __m64 +_m_pf2iw (__m64 __A) +{ + return (__m64)__builtin_ia32_pf2iw ((__v2sf)__A); +} + +static __inline __m64 +_m_pfnacc (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfnacc ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfpnacc (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfpnacc ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pi2fw (__m64 __A) +{ + return (__m64)__builtin_ia32_pi2fw ((__v2si)__A); +} + +static __inline __m64 +_m_pswapd (__m64 __A) +{ + return (__m64)__builtin_ia32_pswapdsf ((__v2sf)__A); +} + +#endif /* __3dNOW_A__ */ +#endif /* __3dNOW__ */ + +#endif /* _MM3DNOW_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/mm_malloc.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/mm_malloc.h new file mode 100644 index 0000000..20d7f5e --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/mm_malloc.h @@ -0,0 +1,77 @@ +/* Copyright (C) 2004 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +#ifndef _MM_MALLOC_H_INCLUDED +#define _MM_MALLOC_H_INCLUDED + +#include +#include + +static __inline__ void* +_mm_malloc (size_t size, size_t align) +{ + void * malloc_ptr; + void * aligned_ptr; + + /* Error if align is not a power of two. */ + if (align & (align - 1)) + { + errno = EINVAL; + return ((void*) 0); + } + + if (size == 0) + return ((void *) 0); + + /* Assume malloc'd pointer is aligned at least to sizeof (void*). + If necessary, add another sizeof (void*) to store the value + returned by malloc. Effectively this enforces a minimum alignment + of sizeof double. */ + if (align < 2 * sizeof (void *)) + align = 2 * sizeof (void *); + + malloc_ptr = malloc (size + align); + if (!malloc_ptr) + return ((void *) 0); + + /* Align We have at least sizeof (void *) space below malloc'd ptr. */ + aligned_ptr = (void *) (((size_t) malloc_ptr + align) + & ~((size_t) (align) - 1)); + + /* Store the original pointer just before p. */ + ((void **) aligned_ptr) [-1] = malloc_ptr; + + return aligned_ptr; +} + +static __inline__ void +_mm_free (void * aligned_ptr) +{ + if (aligned_ptr) + free (((void **) aligned_ptr) [-1]); +} + +#endif /* _MM_MALLOC_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/mmintrin.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/mmintrin.h new file mode 100644 index 0000000..64db058 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/mmintrin.h @@ -0,0 +1,1219 @@ +/* APPLE LOCAL file mainline 2005-06-30 Radar 4131077 */ +/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 + Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef _MMINTRIN_H_INCLUDED +#define _MMINTRIN_H_INCLUDED + +#ifndef __MMX__ +# error "MMX instruction set not enabled" +#else +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +/* APPLE LOCAL 4505813 */ +typedef long long __m64 __attribute__ ((__vector_size__ (8), __may_alias__)); + +/* Internal data types for implementing the intrinsics. */ +typedef int __v2si __attribute__ ((__vector_size__ (8))); +typedef short __v4hi __attribute__ ((__vector_size__ (8))); +typedef char __v8qi __attribute__ ((__vector_size__ (8))); + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#define __always_inline__ __always_inline__, __nodebug__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +/* APPLE LOCAL begin radar 5618945 */ +#undef __STATIC_INLINE +#ifdef __GNUC_STDC_INLINE__ +#define __STATIC_INLINE __inline +#else +#define __STATIC_INLINE static __inline +#endif +/* APPLE LOCAL end radar 5618945 */ + +/* Empty the multimedia state. */ +/* APPLE LOCAL begin radar 4152603 */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_empty (void) +{ + __builtin_ia32_emms (); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_empty (void) +{ + _mm_empty (); +} + +/* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi32_si64 (int __i) +{ + return (__m64) __builtin_ia32_vec_init_v2si (__i, 0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_from_int (int __i) +{ + return _mm_cvtsi32_si64 (__i); +} + +#ifdef __x86_64__ +/* Convert I to a __m64 object. */ + +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_from_int64 (long long __i) +{ + return (__m64) __i; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64_m64 (long long __i) +{ + return (__m64) __i; +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64x_si64 (long long __i) +{ + return (__m64) __i; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_pi64x (long long __i) +{ + return (__m64) __i; +} +#endif + +/* Convert the lower 32 bits of the __m64 object into an integer. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64_si32 (__m64 __i) +{ + return __builtin_ia32_vec_ext_v2si ((__v2si)__i, 0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_to_int (__m64 __i) +{ + return _mm_cvtsi64_si32 (__i); +} + +#ifdef __x86_64__ +/* Convert the __m64 object to a 64bit integer. */ + +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_to_int64 (__m64 __i) +{ + return (long long)__i; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtm64_si64 (__m64 __i) +{ + return (long long)__i; +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64_si64x (__m64 __i) +{ + return (long long)__i; +} +#endif + +/* Pack the four 16-bit values from M1 into the lower four 8-bit values of + the result, and the four 16-bit values from M2 into the upper four 8-bit + values of the result, all with signed saturation. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_packs_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_packsswb ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_packsswb (__m64 __m1, __m64 __m2) +{ + return _mm_packs_pi16 (__m1, __m2); +} + +/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of + the result, and the two 32-bit values from M2 into the upper two 16-bit + values of the result, all with signed saturation. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_packs_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_packssdw ((__v2si)__m1, (__v2si)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_packssdw (__m64 __m1, __m64 __m2) +{ + return _mm_packs_pi32 (__m1, __m2); +} + +/* Pack the four 16-bit values from M1 into the lower four 8-bit values of + the result, and the four 16-bit values from M2 into the upper four 8-bit + values of the result, all with unsigned saturation. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_packs_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_packuswb ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_packuswb (__m64 __m1, __m64 __m2) +{ + return _mm_packs_pu16 (__m1, __m2); +} + +/* Interleave the four 8-bit values from the high half of M1 with the four + 8-bit values from the high half of M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpckhbw ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_punpckhbw (__m64 __m1, __m64 __m2) +{ + return _mm_unpackhi_pi8 (__m1, __m2); +} + +/* Interleave the two 16-bit values from the high half of M1 with the two + 16-bit values from the high half of M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpckhwd ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_punpckhwd (__m64 __m1, __m64 __m2) +{ + return _mm_unpackhi_pi16 (__m1, __m2); +} + +/* Interleave the 32-bit value from the high half of M1 with the 32-bit + value from the high half of M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpckhdq ((__v2si)__m1, (__v2si)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_punpckhdq (__m64 __m1, __m64 __m2) +{ + return _mm_unpackhi_pi32 (__m1, __m2); +} + +/* Interleave the four 8-bit values from the low half of M1 with the four + 8-bit values from the low half of M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpcklbw ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_punpcklbw (__m64 __m1, __m64 __m2) +{ + return _mm_unpacklo_pi8 (__m1, __m2); +} + +/* Interleave the two 16-bit values from the low half of M1 with the two + 16-bit values from the low half of M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpcklwd ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_punpcklwd (__m64 __m1, __m64 __m2) +{ + return _mm_unpacklo_pi16 (__m1, __m2); +} + +/* Interleave the 32-bit value from the low half of M1 with the 32-bit + value from the low half of M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpckldq ((__v2si)__m1, (__v2si)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_punpckldq (__m64 __m1, __m64 __m2) +{ + return _mm_unpacklo_pi32 (__m1, __m2); +} + +/* Add the 8-bit values in M1 to the 8-bit values in M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_paddb (__m64 __m1, __m64 __m2) +{ + return _mm_add_pi8 (__m1, __m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_paddw (__m64 __m1, __m64 __m2) +{ + return _mm_add_pi16 (__m1, __m2); +} + +/* Add the 32-bit values in M1 to the 32-bit values in M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddd ((__v2si)__m1, (__v2si)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_paddd (__m64 __m1, __m64 __m2) +{ + return _mm_add_pi32 (__m1, __m2); +} + +/* Add the 64-bit values in M1 to the 64-bit values in M2. */ +#ifdef __SSE2__ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_si64 (__m64 __m1, __m64 __m2) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_paddq (__m1, __m2); +} +#endif + +/* Add the 8-bit values in M1 to the 8-bit values in M2 using signed + saturated arithmetic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_adds_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddsb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_paddsb (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pi8 (__m1, __m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed + saturated arithmetic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_adds_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddsw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_paddsw (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pi16 (__m1, __m2); +} + +/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned + saturated arithmetic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_adds_pu8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddusb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_paddusb (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pu8 (__m1, __m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned + saturated arithmetic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_adds_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddusw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_paddusw (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pu16 (__m1, __m2); +} + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psubb (__m64 __m1, __m64 __m2) +{ + return _mm_sub_pi8 (__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psubw (__m64 __m1, __m64 __m2) +{ + return _mm_sub_pi16 (__m1, __m2); +} + +/* Subtract the 32-bit values in M2 from the 32-bit values in M1. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubd ((__v2si)__m1, (__v2si)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psubd (__m64 __m1, __m64 __m2) +{ + return _mm_sub_pi32 (__m1, __m2); +} + +/* Add the 64-bit values in M1 to the 64-bit values in M2. */ +#ifdef __SSE2__ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_si64 (__m64 __m1, __m64 __m2) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psubq (__m1, __m2); +} +#endif + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed + saturating arithmetic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_subs_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubsb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psubsb (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pi8 (__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using + signed saturating arithmetic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_subs_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubsw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psubsw (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pi16 (__m1, __m2); +} + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using + unsigned saturating arithmetic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_subs_pu8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubusb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psubusb (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pu8 (__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using + unsigned saturating arithmetic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_subs_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubusw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psubusw (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pu16 (__m1, __m2); +} + +/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing + four 32-bit intermediate results, which are then summed by pairs to + produce two 32-bit results. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_madd_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pmaddwd ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pmaddwd (__m64 __m1, __m64 __m2) +{ + return _mm_madd_pi16 (__m1, __m2); +} + +/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in + M2 and produce the high 16 bits of the 32-bit results. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mulhi_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pmulhw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pmulhw (__m64 __m1, __m64 __m2) +{ + return _mm_mulhi_pi16 (__m1, __m2); +} + +/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce + the low 16 bits of the results. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mullo_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pmullw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pmullw (__m64 __m1, __m64 __m2) +{ + return _mm_mullo_pi16 (__m1, __m2); +} + +/* Shift four 16-bit values in M left by COUNT. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sll_pi16 (__m64 __m, __m64 __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psllw ((__v4hi)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psllw (__m64 __m, __m64 __count) +{ + return _mm_sll_pi16 (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_slli_pi16 (__m64 __m, int __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psllwi ((__v4hi)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psllwi (__m64 __m, int __count) +{ + return _mm_slli_pi16 (__m, __count); +} + +/* Shift two 32-bit values in M left by COUNT. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sll_pi32 (__m64 __m, __m64 __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_pslld ((__v2si)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pslld (__m64 __m, __m64 __count) +{ + return _mm_sll_pi32 (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_slli_pi32 (__m64 __m, int __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_pslldi ((__v2si)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pslldi (__m64 __m, int __count) +{ + return _mm_slli_pi32 (__m, __count); +} + +/* Shift the 64-bit value in M left by COUNT. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sll_si64 (__m64 __m, __m64 __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psllq (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psllq (__m64 __m, __m64 __count) +{ + return _mm_sll_si64 (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_slli_si64 (__m64 __m, int __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psllqi (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psllqi (__m64 __m, int __count) +{ + return _mm_slli_si64 (__m, __count); +} + +/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sra_pi16 (__m64 __m, __m64 __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psraw ((__v4hi)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psraw (__m64 __m, __m64 __count) +{ + return _mm_sra_pi16 (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srai_pi16 (__m64 __m, int __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psrawi ((__v4hi)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psrawi (__m64 __m, int __count) +{ + return _mm_srai_pi16 (__m, __count); +} + +/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sra_pi32 (__m64 __m, __m64 __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psrad ((__v2si)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psrad (__m64 __m, __m64 __count) +{ + return _mm_sra_pi32 (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srai_pi32 (__m64 __m, int __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psradi ((__v2si)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psradi (__m64 __m, int __count) +{ + return _mm_srai_pi32 (__m, __count); +} + +/* Shift four 16-bit values in M right by COUNT; shift in zeros. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srl_pi16 (__m64 __m, __m64 __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psrlw (__m64 __m, __m64 __count) +{ + return _mm_srl_pi16 (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srli_pi16 (__m64 __m, int __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psrlwi ((__v4hi)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psrlwi (__m64 __m, int __count) +{ + return _mm_srli_pi16 (__m, __count); +} + +/* Shift two 32-bit values in M right by COUNT; shift in zeros. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srl_pi32 (__m64 __m, __m64 __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psrld ((__v2si)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psrld (__m64 __m, __m64 __count) +{ + return _mm_srl_pi32 (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srli_pi32 (__m64 __m, int __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psrldi ((__v2si)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psrldi (__m64 __m, int __count) +{ + return _mm_srli_pi32 (__m, __count); +} + +/* Shift the 64-bit value in M left by COUNT; shift in zeros. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srl_si64 (__m64 __m, __m64 __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psrlq (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psrlq (__m64 __m, __m64 __count) +{ + return _mm_srl_si64 (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srli_si64 (__m64 __m, int __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psrlqi (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psrlqi (__m64 __m, int __count) +{ + return _mm_srli_si64 (__m, __count); +} + +/* Bit-wise AND the 64-bit values in M1 and M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_and_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pand (__m1, __m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pand (__m64 __m1, __m64 __m2) +{ + return _mm_and_si64 (__m1, __m2); +} + +/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the + 64-bit value in M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_andnot_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pandn (__m1, __m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pandn (__m64 __m1, __m64 __m2) +{ + return _mm_andnot_si64 (__m1, __m2); +} + +/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_or_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_por (__m1, __m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_por (__m64 __m1, __m64 __m2) +{ + return _mm_or_si64 (__m1, __m2); +} + +/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_xor_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pxor (__m1, __m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pxor (__m64 __m1, __m64 __m2) +{ + return _mm_xor_si64 (__m1, __m2); +} + +/* Compare eight 8-bit values. The result of the comparison is 0xFF if the + test is true and zero if false. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpeqb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pcmpeqb (__m64 __m1, __m64 __m2) +{ + return _mm_cmpeq_pi8 (__m1, __m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpgtb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pcmpgtb (__m64 __m1, __m64 __m2) +{ + return _mm_cmpgt_pi8 (__m1, __m2); +} + +/* Compare four 16-bit values. The result of the comparison is 0xFFFF if + the test is true and zero if false. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpeqw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pcmpeqw (__m64 __m1, __m64 __m2) +{ + return _mm_cmpeq_pi16 (__m1, __m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpgtw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pcmpgtw (__m64 __m1, __m64 __m2) +{ + return _mm_cmpgt_pi16 (__m1, __m2); +} + +/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if + the test is true and zero if false. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpeqd ((__v2si)__m1, (__v2si)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pcmpeqd (__m64 __m1, __m64 __m2) +{ + return _mm_cmpeq_pi32 (__m1, __m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpgtd ((__v2si)__m1, (__v2si)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pcmpgtd (__m64 __m1, __m64 __m2) +{ + return _mm_cmpgt_pi32 (__m1, __m2); +} + +/* Creates a 64-bit zero. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setzero_si64 (void) +{ + return (__m64)0LL; +} + +/* Creates a vector of two 32-bit values; I0 is least significant. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_pi32 (int __i1, int __i0) +{ + return (__m64) __builtin_ia32_vec_init_v2si (__i0, __i1); +} + +/* Creates a vector of four 16-bit values; W0 is least significant. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_pi16 (short __w3, short __w2, short __w1, short __w0) +{ + return (__m64) __builtin_ia32_vec_init_v4hi (__w0, __w1, __w2, __w3); +} + +/* Creates a vector of eight 8-bit values; B0 is least significant. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_pi8 (char __b7, char __b6, char __b5, char __b4, + char __b3, char __b2, char __b1, char __b0) +{ + return (__m64) __builtin_ia32_vec_init_v8qi (__b0, __b1, __b2, __b3, + __b4, __b5, __b6, __b7); +} + +/* Similar, but with the arguments in reverse order. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_pi32 (int __i0, int __i1) +{ + return _mm_set_pi32 (__i1, __i0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3) +{ + return _mm_set_pi16 (__w3, __w2, __w1, __w0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3, + char __b4, char __b5, char __b6, char __b7) +{ + return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0); +} + +/* Creates a vector of two 32-bit values, both elements containing I. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_pi32 (int __i) +{ + return _mm_set_pi32 (__i, __i); +} + +/* Creates a vector of four 16-bit values, all elements containing W. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_pi16 (short __w) +{ + return _mm_set_pi16 (__w, __w, __w, __w); +} + +/* Creates a vector of eight 8-bit values, all elements containing B. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_pi8 (char __b) +{ + return _mm_set_pi8 (__b, __b, __b, __b, __b, __b, __b, __b); +} +/* APPLE LOCAL end radar 4152603 */ + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#undef __always_inline__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +#endif /* __MMX__ */ +#endif /* _MMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/nmmintrin.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/nmmintrin.h new file mode 100644 index 0000000..5c0db20 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/nmmintrin.h @@ -0,0 +1,41 @@ +/* APPLE LOCAL file 5612787 mainline sse4 */ +/* Copyright (C) 2007 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 10.0. */ + +#ifndef _NMMINTRIN_H_INCLUDED +#define _NMMINTRIN_H_INCLUDED + +#ifndef __SSE4_2__ +# error "SSE4.2 instruction set not enabled" +#else +/* We just include SSE4.1 header file. */ +#include +#endif /* __SSE4_2__ */ + +#endif /* _NMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/omp.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/omp.h new file mode 100644 index 0000000..1400282 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/omp.h @@ -0,0 +1,87 @@ +/* Copyright (C) 2005 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for + more details. + + You should have received a copy of the GNU Lesser General Public License + along with libgomp; see the file COPYING.LIB. If not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + MA 02110-1301, USA. */ + +/* As a special exception, if you link this library with other files, some + of which are compiled with GCC, to produce an executable, this library + does not by itself cause the resulting executable to be covered by the + GNU General Public License. This exception does not however invalidate + any other reasons why the executable file might be covered by the GNU + General Public License. */ + +#ifndef OMP_H +#define OMP_H 1 + +#ifndef _LIBGOMP_OMP_LOCK_DEFINED +#define _LIBGOMP_OMP_LOCK_DEFINED 1 +/* These two structures get edited by the libgomp build process to + reflect the shape of the two types. Their internals are private + to the library. */ + +typedef struct +{ + unsigned char _x[64] + __attribute__((__aligned__(8))); +} omp_lock_t; + +typedef struct +{ + unsigned char _x[72] + __attribute__((__aligned__(8))); +} omp_nest_lock_t; +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +extern void omp_set_num_threads (int); +extern int omp_get_num_threads (void); +extern int omp_get_max_threads (void); +extern int omp_get_thread_num (void); +extern int omp_get_num_procs (void); + +extern int omp_in_parallel (void); + +extern void omp_set_dynamic (int); +extern int omp_get_dynamic (void); + +extern void omp_set_nested (int); +extern int omp_get_nested (void); + +extern void omp_init_lock (omp_lock_t *); +extern void omp_destroy_lock (omp_lock_t *); +extern void omp_set_lock (omp_lock_t *); +extern void omp_unset_lock (omp_lock_t *); +extern int omp_test_lock (omp_lock_t *); + +extern void omp_init_nest_lock (omp_nest_lock_t *); +extern void omp_destroy_nest_lock (omp_nest_lock_t *); +extern void omp_set_nest_lock (omp_nest_lock_t *); +extern void omp_unset_nest_lock (omp_nest_lock_t *); +extern int omp_test_nest_lock (omp_nest_lock_t *); + +extern double omp_get_wtime (void); +extern double omp_get_wtick (void); + +#ifdef __cplusplus +} +#endif + +#endif /* OMP_H */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/pmmintrin.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/pmmintrin.h new file mode 100644 index 0000000..7640941 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/pmmintrin.h @@ -0,0 +1,172 @@ +/* APPLE LOCAL file mainline 2005-06-30 Radar 4131077 */ +/* Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef _PMMINTRIN_H_INCLUDED +#define _PMMINTRIN_H_INCLUDED + +#ifdef __SSE3__ +#include +#include + +/* Additional bits in the MXCSR. */ +#define _MM_DENORMALS_ZERO_MASK 0x0040 +#define _MM_DENORMALS_ZERO_ON 0x0040 +#define _MM_DENORMALS_ZERO_OFF 0x0000 + +#define _MM_SET_DENORMALS_ZERO_MODE(mode) \ + _mm_setcsr ((_mm_getcsr () & ~_MM_DENORMALS_ZERO_MASK) | (mode)) +#define _MM_GET_DENORMALS_ZERO_MODE() \ + (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK) + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#define __always_inline__ __always_inline__, __nodebug__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +/* APPLE LOCAL begin radar 5618945 */ +#undef __STATIC_INLINE +#ifdef __GNUC_STDC_INLINE__ +#define __STATIC_INLINE __inline +#else +#define __STATIC_INLINE static __inline +#endif +/* APPLE LOCAL end radar 5618945 */ + +/* APPLE LOCAL begin radar 4152603 */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_addsub_ps (__m128 __X, __m128 __Y) +{ + return (__m128) __builtin_ia32_addsubps ((__v4sf)__X, (__v4sf)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hadd_ps (__m128 __X, __m128 __Y) +{ + return (__m128) __builtin_ia32_haddps ((__v4sf)__X, (__v4sf)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hsub_ps (__m128 __X, __m128 __Y) +{ + return (__m128) __builtin_ia32_hsubps ((__v4sf)__X, (__v4sf)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movehdup_ps (__m128 __X) +{ + return (__m128) __builtin_ia32_movshdup ((__v4sf)__X); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_moveldup_ps (__m128 __X) +{ + return (__m128) __builtin_ia32_movsldup ((__v4sf)__X); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_addsub_pd (__m128d __X, __m128d __Y) +{ + return (__m128d) __builtin_ia32_addsubpd ((__v2df)__X, (__v2df)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hadd_pd (__m128d __X, __m128d __Y) +{ + return (__m128d) __builtin_ia32_haddpd ((__v2df)__X, (__v2df)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hsub_pd (__m128d __X, __m128d __Y) +{ + return (__m128d) __builtin_ia32_hsubpd ((__v2df)__X, (__v2df)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loaddup_pd (double const *__P) +{ + return _mm_load1_pd (__P); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movedup_pd (__m128d __X) +{ + return _mm_shuffle_pd (__X, __X, _MM_SHUFFLE2 (0,0)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_lddqu_si128 (__m128i const *__P) +{ + return (__m128i) __builtin_ia32_lddqu ((char const *)__P); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_monitor (void const * __P, unsigned int __E, unsigned int __H) +{ + __builtin_ia32_monitor (__P, __E, __H); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mwait (unsigned int __E, unsigned int __H) +{ + __builtin_ia32_mwait (__E, __H); +} +/* APPLE LOCAL end radar 4152603 */ +/* APPLE LOCAL begin nodebug inline 4152603 */ +#undef __always_inline__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +#endif /* __SSE3__ */ + +#endif /* _PMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/ppc_intrinsics.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/ppc_intrinsics.h new file mode 120000 index 0000000..9383ee4 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/ppc_intrinsics.h @@ -0,0 +1 @@ +../../../../../include/gcc/darwin/4.2/ppc_intrinsics.h \ No newline at end of file diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/smmintrin.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/smmintrin.h new file mode 100644 index 0000000..2da9a74 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/smmintrin.h @@ -0,0 +1,836 @@ +/* APPLE LOCAL file 5612787 mainline sse4 */ +/* Copyright (C) 2007 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 10.0. */ + +#ifndef _SMMINTRIN_H_INCLUDED +#define _SMMINTRIN_H_INCLUDED + +#ifndef __SSE4_1__ +# error "SSE4.1 instruction set not enabled" +#else + +/* We need definitions from the SSSE3, SSE3, SSE2 and SSE header + files. */ +#include + +/* SSE4.1 */ + +/* Rounding mode macros. */ +#define _MM_FROUND_TO_NEAREST_INT 0x00 +#define _MM_FROUND_TO_NEG_INF 0x01 +#define _MM_FROUND_TO_POS_INF 0x02 +#define _MM_FROUND_TO_ZERO 0x03 +#define _MM_FROUND_CUR_DIRECTION 0x04 + +#define _MM_FROUND_RAISE_EXC 0x00 +#define _MM_FROUND_NO_EXC 0x08 + +#define _MM_FROUND_NINT \ + (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_FLOOR \ + (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_CEIL \ + (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_TRUNC \ + (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_RINT \ + (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_NEARBYINT \ + (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC) + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#define __always_inline__ __always_inline__, __nodebug__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +/* APPLE LOCAL begin radar 5618945 */ +#undef __STATIC_INLINE +#ifdef __GNUC_STDC_INLINE__ +#define __STATIC_INLINE __inline +#else +#define __STATIC_INLINE static __inline +#endif +/* APPLE LOCAL end radar 5618945 */ + +/* Integer blend instructions - select data from 2 sources using + constant/variable mask. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_blend_epi16 (__m128i __X, __m128i __Y, const int __M) +{ + return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__X, + (__v8hi)__Y, + __M); +} +#else +#define _mm_blend_epi16(X, Y, M) \ + ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(X), (__v8hi)(Y), (M))) +#endif + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_blendv_epi8 (__m128i __X, __m128i __Y, __m128i __M) +{ + return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__X, + (__v16qi)__Y, + (__v16qi)__M); +} + +/* Single precision floating point blend instructions - select data + from 2 sources using constant/variable mask. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +_mm_blend_ps (__m128 __X, __m128 __Y, const int __M) +{ + return (__m128) __builtin_ia32_blendps ((__v4sf)__X, + (__v4sf)__Y, + __M); +} +#else +#define _mm_blend_ps(X, Y, M) \ + ((__m128) __builtin_ia32_blendps ((__v4sf)(X), (__v4sf)(Y), (M))) +#endif + +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +_mm_blendv_ps (__m128 __X, __m128 __Y, __m128 __M) +{ + return (__m128) __builtin_ia32_blendvps ((__v4sf)__X, + (__v4sf)__Y, + (__v4sf)__M); +} + +/* Double precision floating point blend instructions - select data + from 2 sources using constant/variable mask. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +_mm_blend_pd (__m128d __X, __m128d __Y, const int __M) +{ + return (__m128d) __builtin_ia32_blendpd ((__v2df)__X, + (__v2df)__Y, + __M); +} +#else +#define _mm_blend_pd(X, Y, M) \ + ((__m128d) __builtin_ia32_blendpd ((__v2df)(X), (__v2df)(Y), (M))) +#endif + +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +_mm_blendv_pd (__m128d __X, __m128d __Y, __m128d __M) +{ + return (__m128d) __builtin_ia32_blendvpd ((__v2df)__X, + (__v2df)__Y, + (__v2df)__M); +} + +/* Dot product instructions with mask-defined summing and zeroing parts + of result. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +_mm_dp_ps (__m128 __X, __m128 __Y, const int __M) +{ + return (__m128) __builtin_ia32_dpps ((__v4sf)__X, + (__v4sf)__Y, + __M); +} + +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +_mm_dp_pd (__m128d __X, __m128d __Y, const int __M) +{ + return (__m128d) __builtin_ia32_dppd ((__v2df)__X, + (__v2df)__Y, + __M); +} +#else +#define _mm_dp_ps(X, Y, M) \ + ((__m128) __builtin_ia32_dpps ((__v4sf)(X), (__v4sf)(Y), (M))) + +#define _mm_dp_pd(X, Y, M) \ + ((__m128d) __builtin_ia32_dppd ((__v2df)(X), (__v2df)(Y), (M))) +#endif + +/* Packed integer 64-bit comparison, zeroing or filling with ones + corresponding parts of result. */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cmpeq_epi64 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pcmpeqq ((__v2di)__X, (__v2di)__Y); +} + +/* Min/max packed integer instructions. */ + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_min_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pminsb128 ((__v16qi)__X, (__v16qi)__Y); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_max_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi)__X, (__v16qi)__Y); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_min_epu16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pminuw128 ((__v8hi)__X, (__v8hi)__Y); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_max_epu16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi)__X, (__v8hi)__Y); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_min_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pminsd128 ((__v4si)__X, (__v4si)__Y); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_max_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si)__X, (__v4si)__Y); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_min_epu32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pminud128 ((__v4si)__X, (__v4si)__Y); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_max_epu32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaxud128 ((__v4si)__X, (__v4si)__Y); +} + +/* Packed integer 32-bit multiplication with truncation of upper + halves of results. */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_mullo_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmulld128 ((__v4si)__X, (__v4si)__Y); +} + +/* Packed integer 32-bit multiplication of 2 pairs of operands + with two 64-bit results. */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_mul_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__X, (__v4si)__Y); +} + +/* Packed integer 128-bit bitwise comparison. Return 1 if + (__V & __M) == 0. */ +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_testz_si128 (__m128i __M, __m128i __V) +{ + return __builtin_ia32_ptestz128 ((__v2di)__M, (__v2di)__V); +} + +/* Packed integer 128-bit bitwise comparison. Return 1 if + (__V & ~__M) == 0. */ +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_testc_si128 (__m128i __M, __m128i __V) +{ + return __builtin_ia32_ptestc128 ((__v2di)__M, (__v2di)__V); +} + +/* Packed integer 128-bit bitwise comparison. Return 1 if + (__V & __M) != 0 && (__V & ~__M) != 0. */ +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_testnzc_si128 (__m128i __M, __m128i __V) +{ + return __builtin_ia32_ptestnzc128 ((__v2di)__M, (__v2di)__V); +} + +/* Macros for packed integer 128-bit comparison intrinsics. */ +#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V)) + +#define _mm_test_all_ones(V) \ + _mm_testc_si128 ((V), _mm_cmpeq_epi32 ((V), (V))) + +#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128 ((M), (V)) + +/* Insert single precision float into packed single precision array + element selected by index N. The bits [7-6] of N define S + index, the bits [5-4] define D index, and bits [3-0] define + zeroing mask for D. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +_mm_insert_ps (__m128 __D, __m128 __S, const int __N) +{ + return (__m128) __builtin_ia32_insertps128 ((__v4sf)__D, + (__v4sf)__S, + __N); +} +#else +#define _mm_insert_ps(D, S, N) \ + ((__m128) __builtin_ia32_insertps128 ((__v4sf)(D), (__v4sf)(S), (N))) +#endif + +/* Helper macro to create the N value for _mm_insert_ps. */ +#define _MM_MK_INSERTPS_NDX(S, D, M) (((S) << 6) | ((D) << 4) | (M)) + +/* Extract binary representation of single precision float from packed + single precision array element of X selected by index N. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_extract_ps (__m128 __X, const int __N) +{ + union { int i; float f; } __tmp; + __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)__X, __N); + return __tmp.i; +} +#else +#define _mm_extract_ps(X, N) \ + (__extension__ \ + ({ \ + union { int i; float f; } __tmp; \ + __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)(X), (N)); \ + __tmp.i; \ + }) \ + ) +#endif + +/* Extract binary representation of single precision float into + D from packed single precision array element of S selected + by index N. */ +#define _MM_EXTRACT_FLOAT(D, S, N) \ + { (D) = __builtin_ia32_vec_ext_v4sf ((__v4sf)(S), (N)); } + +/* Extract specified single precision float element into the lower + part of __m128. */ +#define _MM_PICK_OUT_PS(X, N) \ + _mm_insert_ps (_mm_setzero_ps (), (X), \ + _MM_MK_INSERTPS_NDX ((N), 0, 0x0e)) + +/* Insert integer, S, into packed integer array element of D + selected by index N. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_insert_epi8 (__m128i __D, int __S, const int __N) +{ + return (__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)__D, + __S, __N); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_insert_epi32 (__m128i __D, int __S, const int __N) +{ + return (__m128i) __builtin_ia32_vec_set_v4si ((__v4si)__D, + __S, __N); +} + +#ifdef __x86_64__ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_insert_epi64 (__m128i __D, long long __S, const int __N) +{ + return (__m128i) __builtin_ia32_vec_set_v2di ((__v2di)__D, + __S, __N); +} +#endif +#else +#define _mm_insert_epi8(D, S, N) \ + ((__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)(D), (S), (N))) + +#define _mm_insert_epi32(D, S, N) \ + ((__m128i) __builtin_ia32_vec_set_v4si ((__v4si)(D), (S), (N))) + +#ifdef __x86_64__ +#define _mm_insert_epi64(D, S, N) \ + ((__m128i) __builtin_ia32_vec_set_v2di ((__v2di)(D), (S), (N))) +#endif +#endif + +/* Extract integer from packed integer array element of X selected by + index N. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_extract_epi8 (__m128i __X, const int __N) +{ + return __builtin_ia32_vec_ext_v16qi ((__v16qi)__X, __N); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_extract_epi32 (__m128i __X, const int __N) +{ + return __builtin_ia32_vec_ext_v4si ((__v4si)__X, __N); +} + +#ifdef __x86_64__ +__STATIC_INLINE long long __attribute__((__always_inline__)) +_mm_extract_epi64 (__m128i __X, const int __N) +{ + return __builtin_ia32_vec_ext_v2di ((__v2di)__X, __N); +} +#endif +#else +#define _mm_extract_epi8(X, N) \ + __builtin_ia32_vec_ext_v16qi ((__v16qi) X, (N)) +#define _mm_extract_epi32(X, N) \ + __builtin_ia32_vec_ext_v4si ((__v4si) X, (N)) + +#ifdef __x86_64__ +#define _mm_extract_epi64(X, N) \ + ((long long) __builtin_ia32_vec_ext_v2di ((__v2di)(X), (N))) +#endif +#endif + +/* Return horizontal packed word minimum and its index in bits [15:0] + and bits [18:16] respectively. */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_minpos_epu16 (__m128i __X) +{ + return (__m128i) __builtin_ia32_phminposuw128 ((__v8hi)__X); +} + +/* Packed/scalar double precision floating point rounding. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +_mm_round_pd (__m128d __V, const int __M) +{ + return (__m128d) __builtin_ia32_roundpd ((__v2df)__V, __M); +} + +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +_mm_round_sd(__m128d __D, __m128d __V, const int __M) +{ + return (__m128d) __builtin_ia32_roundsd ((__v2df)__D, + (__v2df)__V, + __M); +} +#else +#define _mm_round_pd(V, M) \ + ((__m128d) __builtin_ia32_roundpd ((__v2df)(V), (M))) + +#define _mm_round_sd(D, V, M) \ + ((__m128d) __builtin_ia32_roundsd ((__v2df)(D), (__v2df)(V), (M))) +#endif + +/* Packed/scalar single precision floating point rounding. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +_mm_round_ps (__m128 __V, const int __M) +{ + return (__m128) __builtin_ia32_roundps ((__v4sf)__V, __M); +} + +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +_mm_round_ss (__m128 __D, __m128 __V, const int __M) +{ + return (__m128) __builtin_ia32_roundss ((__v4sf)__D, + (__v4sf)__V, + __M); +} +#else +#define _mm_round_ps(V, M) \ + ((__m128) __builtin_ia32_roundps ((__v4sf)(V), (M))) + +#define _mm_round_ss(D, V, M) \ + ((__m128) __builtin_ia32_roundss ((__v4sf)(D), (__v4sf)(V), (M))) +#endif + +/* Macros for ceil/floor intrinsics. */ +#define _mm_ceil_pd(V) _mm_round_pd ((V), _MM_FROUND_CEIL) +#define _mm_ceil_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_CEIL) + +#define _mm_floor_pd(V) _mm_round_pd((V), _MM_FROUND_FLOOR) +#define _mm_floor_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_FLOOR) + +#define _mm_ceil_ps(V) _mm_round_ps ((V), _MM_FROUND_CEIL) +#define _mm_ceil_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_CEIL) + +#define _mm_floor_ps(V) _mm_round_ps ((V), _MM_FROUND_FLOOR) +#define _mm_floor_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_FLOOR) + +/* Packed integer sign-extension. */ + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepi8_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxbd128 ((__v16qi)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepi16_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxwd128 ((__v8hi)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepi8_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxbq128 ((__v16qi)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepi32_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxdq128 ((__v4si)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepi16_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxwq128 ((__v8hi)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepi8_epi16 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxbw128 ((__v16qi)__X); +} + +/* Packed integer zero-extension. */ + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepu8_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxbd128 ((__v16qi)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepu16_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxwd128 ((__v8hi)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepu8_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxbq128 ((__v16qi)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepu32_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxdq128 ((__v4si)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepu16_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxwq128 ((__v8hi)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepu8_epi16 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxbw128 ((__v16qi)__X); +} + +/* Pack 8 double words from 2 operands into 8 words of result with + unsigned saturation. */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_packus_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_packusdw128 ((__v4si)__X, (__v4si)__Y); +} + +/* Sum absolute 8-bit integer difference of adjacent groups of 4 + byte integers in the first 2 operands. Starting offsets within + operands are determined by the 3rd mask operand. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_mpsadbw_epu8 (__m128i __X, __m128i __Y, const int __M) +{ + return (__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)__X, + (__v16qi)__Y, __M); +} +#else +#define _mm_mpsadbw_epu8(X, Y, M) \ + ((__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)(X), (__v16qi)(Y), (M))) +#endif + +/* Load double quadword using non-temporal aligned hint. */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_stream_load_si128 (__m128i *__X) +{ + return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __X); +} + +#ifdef __SSE4_2__ + +/* These macros specify the source data format. */ +#define SIDD_UBYTE_OPS 0x00 +#define SIDD_UWORD_OPS 0x01 +#define SIDD_SBYTE_OPS 0x02 +#define SIDD_SWORD_OPS 0x03 + +/* These macros specify the comparison operation. */ +#define SIDD_CMP_EQUAL_ANY 0x00 +#define SIDD_CMP_RANGES 0x04 +#define SIDD_CMP_EQUAL_EACH 0x08 +#define SIDD_CMP_EQUAL_ORDERED 0x0c + +/* These macros specify the the polarity. */ +#define SIDD_POSITIVE_POLARITY 0x00 +#define SIDD_NEGATIVE_POLARITY 0x10 +#define SIDD_MASKED_POSITIVE_POLARITY 0x20 +#define SIDD_MASKED_NEGATIVE_POLARITY 0x30 + +/* These macros specify the output selection in _mm_cmpXstri (). */ +#define SIDD_LEAST_SIGNIFICANT 0x00 +#define SIDD_MOST_SIGNIFICANT 0x40 + +/* These macros specify the output selection in _mm_cmpXstrm (). */ +#define SIDD_BIT_MASK 0x00 +#define SIDD_UNIT_MASK 0x40 + +/* Intrinsics for text/string processing. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cmpistrm (__m128i __X, __m128i __Y, const int __M) +{ + return (__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpistri (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistri128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cmpestrm (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return (__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpestri (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestri128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} +#else +#define _mm_cmpistrm(X, Y, M) \ + ((__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)(X), (__v16qi)(Y), (M))) +#define _mm_cmpistri(X, Y, M) \ + __builtin_ia32_pcmpistri128 ((__v16qi)(X), (__v16qi)(Y), (M)) + +#define _mm_cmpestrm(X, LX, Y, LY, M) \ + ((__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)(X), (int)(LX), \ + (__v16qi)(Y), (int)(LY), (M))) +#define _mm_cmpestri(X, LX, Y, LY, M) \ + __builtin_ia32_pcmpestri128 ((__v16qi)(X), (int)(LX), \ + (__v16qi)(Y), (int)(LY), (M)) +#endif + +/* Intrinsics for text/string processing and reading values of + EFlags. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpistra (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistria128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpistrc (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistric128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpistro (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistrio128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpistrs (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistris128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpistrz (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistriz128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpestra (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestria128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpestrc (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestric128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpestro (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestrio128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpestrs (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestris128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpestrz (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestriz128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} +#else +#define _mm_cmpistra(X, Y, M) \ + __builtin_ia32_pcmpistria128 ((__v16qi)(X), (__v16qi)(Y), (M)) +#define _mm_cmpistrc(X, Y, M) \ + __builtin_ia32_pcmpistric128 ((__v16qi)(X), (__v16qi)(Y), (M)) +#define _mm_cmpistro(X, Y, M) \ + __builtin_ia32_pcmpistrio128 ((__v16qi)(X), (__v16qi)(Y), (M)) +#define _mm_cmpistrs(X, Y, M) \ + __builtin_ia32_pcmpistris128 ((__v16qi)(X), (__v16qi)(Y), (M)) +#define _mm_cmpistrz(X, Y, M) \ + __builtin_ia32_pcmpistriz128 ((__v16qi)(X), (__v16qi)(Y), (M)) + +#define _mm_cmpestra(X, LX, Y, LY, M) \ + __builtin_ia32_pcmpestria128 ((__v16qi)(X), (int)(LX), \ + (__v16qi)(Y), (int)(LY), (M)) +#define _mm_cmpestrc(X, LX, Y, LY, M) \ + __builtin_ia32_pcmpestric128 ((__v16qi)(X), (int)(LX), \ + (__v16qi)(Y), (int)(LY), (M)) +#define _mm_cmpestro(X, LX, Y, LY, M) \ + __builtin_ia32_pcmpestrio128 ((__v16qi)(X), (int)(LX), \ + (__v16qi)(Y), (int)(LY), (M)) +#define _mm_cmpestrs(X, LX, Y, LY, M) \ + __builtin_ia32_pcmpestris128 ((__v16qi)(X), (int)(LX), \ + (__v16qi)(Y), (int)(LY), (M)) +#define _mm_cmpestrz(X, LX, Y, LY, M) \ + __builtin_ia32_pcmpestriz128 ((__v16qi)(X), (int)(LX), \ + (__v16qi)(Y), (int)(LY), (M)) +#endif + +/* Packed integer 64-bit comparison, zeroing or filling with ones + corresponding parts of result. */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cmpgt_epi64 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pcmpgtq ((__v2di)__X, (__v2di)__Y); +} + +/* Calculate a number of bits set to 1. */ +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_popcnt_u32 (unsigned int __X) +{ + return __builtin_popcount (__X); +} + +#ifdef __x86_64__ +__STATIC_INLINE long long __attribute__((__always_inline__)) +_mm_popcnt_u64 (unsigned long long __X) +{ + return __builtin_popcountll (__X); +} +#endif + +/* Accumulate CRC32 (polynomial 0x11EDC6F41) value. */ +__STATIC_INLINE unsigned int __attribute__((__always_inline__)) +_mm_crc32_u8 (unsigned int __C, unsigned char __V) +{ + return __builtin_ia32_crc32qi (__C, __V); +} + +__STATIC_INLINE unsigned int __attribute__((__always_inline__)) +_mm_crc32_u16 (unsigned int __C, unsigned short __V) +{ + return __builtin_ia32_crc32hi (__C, __V); +} + +__STATIC_INLINE unsigned int __attribute__((__always_inline__)) +_mm_crc32_u32 (unsigned int __C, unsigned int __V) +{ + return __builtin_ia32_crc32si (__C, __V); +} + +#ifdef __x86_64__ +__STATIC_INLINE unsigned long long __attribute__((__always_inline__)) +_mm_crc32_u64 (unsigned long long __C, unsigned long long __V) +{ + return __builtin_ia32_crc32di (__C, __V); +} +#endif + +#endif /* __SSE4_2__ */ + +#endif /* __SSE4_1__ */ + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#undef __always_inline__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +#endif /* _SMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/stdarg.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/stdarg.h new file mode 100644 index 0000000..c9ddd6b --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/stdarg.h @@ -0,0 +1,133 @@ +/* Copyright (C) 1989, 1997, 1998, 1999, 2000 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* + * ISO C Standard: 7.15 Variable arguments + */ + +#ifndef _STDARG_H +#ifndef _ANSI_STDARG_H_ +#ifndef __need___va_list +#define _STDARG_H +#define _ANSI_STDARG_H_ +#endif /* not __need___va_list */ +#undef __need___va_list + +/* Define __gnuc_va_list. */ + +#ifndef __GNUC_VA_LIST +#define __GNUC_VA_LIST +typedef __builtin_va_list __gnuc_va_list; +#endif + +/* Define the standard macros for the user, + if this invocation was from the user program. */ +#ifdef _STDARG_H + +#define va_start(v,l) __builtin_va_start(v,l) +#define va_end(v) __builtin_va_end(v) +#define va_arg(v,l) __builtin_va_arg(v,l) +#if !defined(__STRICT_ANSI__) || __STDC_VERSION__ + 0 >= 199900L +#define va_copy(d,s) __builtin_va_copy(d,s) +#endif +#define __va_copy(d,s) __builtin_va_copy(d,s) + +/* Define va_list, if desired, from __gnuc_va_list. */ +/* We deliberately do not define va_list when called from + stdio.h, because ANSI C says that stdio.h is not supposed to define + va_list. stdio.h needs to have access to that data type, + but must not use that name. It should use the name __gnuc_va_list, + which is safe because it is reserved for the implementation. */ + +#ifdef _HIDDEN_VA_LIST /* On OSF1, this means varargs.h is "half-loaded". */ +#undef _VA_LIST +#endif + +#ifdef _BSD_VA_LIST +#undef _BSD_VA_LIST +#endif + +#if defined(__svr4__) || (defined(_SCO_DS) && !defined(__VA_LIST)) +/* SVR4.2 uses _VA_LIST for an internal alias for va_list, + so we must avoid testing it and setting it here. + SVR4 uses _VA_LIST as a flag in stdarg.h, but we should + have no conflict with that. */ +#ifndef _VA_LIST_ +#define _VA_LIST_ +#ifdef __i860__ +#ifndef _VA_LIST +#define _VA_LIST va_list +#endif +#endif /* __i860__ */ +typedef __gnuc_va_list va_list; +#ifdef _SCO_DS +#define __VA_LIST +#endif +#endif /* _VA_LIST_ */ +#else /* not __svr4__ || _SCO_DS */ + +/* The macro _VA_LIST_ is the same thing used by this file in Ultrix. + But on BSD NET2 we must not test or define or undef it. + (Note that the comments in NET 2's ansi.h + are incorrect for _VA_LIST_--see stdio.h!) */ +#if !defined (_VA_LIST_) || defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__) || defined(WINNT) +/* The macro _VA_LIST_DEFINED is used in Windows NT 3.5 */ +#ifndef _VA_LIST_DEFINED +/* The macro _VA_LIST is used in SCO Unix 3.2. */ +#ifndef _VA_LIST +/* The macro _VA_LIST_T_H is used in the Bull dpx2 */ +#ifndef _VA_LIST_T_H +/* The macro __va_list__ is used by BeOS. */ +#ifndef __va_list__ +typedef __gnuc_va_list va_list; +#endif /* not __va_list__ */ +#endif /* not _VA_LIST_T_H */ +#endif /* not _VA_LIST */ +#endif /* not _VA_LIST_DEFINED */ +#if !(defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__)) +#define _VA_LIST_ +#endif +#ifndef _VA_LIST +#define _VA_LIST +#endif +#ifndef _VA_LIST_DEFINED +#define _VA_LIST_DEFINED +#endif +#ifndef _VA_LIST_T_H +#define _VA_LIST_T_H +#endif +#ifndef __va_list__ +#define __va_list__ +#endif + +#endif /* not _VA_LIST_, except on certain systems */ + +#endif /* not __svr4__ */ + +#endif /* _STDARG_H */ + +#endif /* not _ANSI_STDARG_H_ */ +#endif /* not _STDARG_H */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/stdbool.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/stdbool.h new file mode 100644 index 0000000..b36e650 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/stdbool.h @@ -0,0 +1,53 @@ +/* Copyright (C) 1998, 1999, 2000 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* + * ISO C Standard: 7.16 Boolean type and values + */ + +#ifndef _STDBOOL_H +#define _STDBOOL_H + +#ifndef __cplusplus + +#define bool _Bool +#define true 1 +#define false 0 + +#else /* __cplusplus */ + +/* Supporting in C++ is a GCC extension. */ +#define _Bool bool +#define bool bool +#define false false +#define true true + +#endif /* __cplusplus */ + +/* Signal that all the definitions are present. */ +#define __bool_true_false_are_defined 1 + +#endif /* stdbool.h */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/stddef.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/stddef.h new file mode 100644 index 0000000..7e61795 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/stddef.h @@ -0,0 +1,419 @@ +/* Copyright (C) 1989, 1997, 1998, 1999, 2000, 2002, 2004 + Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* + * ISO C Standard: 7.17 Common definitions + */ +#if (!defined(_STDDEF_H) && !defined(_STDDEF_H_) && !defined(_ANSI_STDDEF_H) \ + && !defined(__STDDEF_H__)) \ + || defined(__need_wchar_t) || defined(__need_size_t) \ + || defined(__need_ptrdiff_t) || defined(__need_NULL) \ + || defined(__need_wint_t) + +/* Any one of these symbols __need_* means that GNU libc + wants us just to define one data type. So don't define + the symbols that indicate this file's entire job has been done. */ +#if (!defined(__need_wchar_t) && !defined(__need_size_t) \ + && !defined(__need_ptrdiff_t) && !defined(__need_NULL) \ + && !defined(__need_wint_t)) +#define _STDDEF_H +#define _STDDEF_H_ +/* snaroff@next.com says the NeXT needs this. */ +#define _ANSI_STDDEF_H +/* Irix 5.1 needs this. */ +#define __STDDEF_H__ +#endif + +#ifndef __sys_stdtypes_h +/* This avoids lossage on SunOS but only if stdtypes.h comes first. + There's no way to win with the other order! Sun lossage. */ + +/* On 4.3bsd-net2, make sure ansi.h is included, so we have + one less case to deal with in the following. */ +#if defined (__BSD_NET2__) || defined (____386BSD____) || (defined (__FreeBSD__) && (__FreeBSD__ < 5)) || defined(__NetBSD__) +#include +#endif +/* On FreeBSD 5, machine/ansi.h does not exist anymore... */ +#if defined (__FreeBSD__) && (__FreeBSD__ >= 5) +#include +#endif + +/* In 4.3bsd-net2, machine/ansi.h defines these symbols, which are + defined if the corresponding type is *not* defined. + FreeBSD-2.1 defines _MACHINE_ANSI_H_ instead of _ANSI_H_ */ +#if defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) +#if !defined(_SIZE_T_) && !defined(_BSD_SIZE_T_) +#define _SIZE_T +#endif +#if !defined(_PTRDIFF_T_) && !defined(_BSD_PTRDIFF_T_) +#define _PTRDIFF_T +#endif +/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_ + instead of _WCHAR_T_. */ +#if !defined(_WCHAR_T_) && !defined(_BSD_WCHAR_T_) +#ifndef _BSD_WCHAR_T_ +#define _WCHAR_T +#endif +#endif +/* Undef _FOO_T_ if we are supposed to define foo_t. */ +#if defined (__need_ptrdiff_t) || defined (_STDDEF_H_) +#undef _PTRDIFF_T_ +#undef _BSD_PTRDIFF_T_ +#endif +#if defined (__need_size_t) || defined (_STDDEF_H_) +#undef _SIZE_T_ +#undef _BSD_SIZE_T_ +#endif +#if defined (__need_wchar_t) || defined (_STDDEF_H_) +#undef _WCHAR_T_ +#undef _BSD_WCHAR_T_ +#endif +#endif /* defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) */ + +/* Sequent's header files use _PTRDIFF_T_ in some conflicting way. + Just ignore it. */ +#if defined (__sequent__) && defined (_PTRDIFF_T_) +#undef _PTRDIFF_T_ +#endif + +/* On VxWorks, may have defined macros like + _TYPE_size_t which will typedef size_t. fixincludes patched the + vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is + not defined, and so that defining this macro defines _GCC_SIZE_T. + If we find that the macros are still defined at this point, we must + invoke them so that the type is defined as expected. */ +#if defined (_TYPE_ptrdiff_t) && (defined (__need_ptrdiff_t) || defined (_STDDEF_H_)) +_TYPE_ptrdiff_t; +#undef _TYPE_ptrdiff_t +#endif +#if defined (_TYPE_size_t) && (defined (__need_size_t) || defined (_STDDEF_H_)) +_TYPE_size_t; +#undef _TYPE_size_t +#endif +#if defined (_TYPE_wchar_t) && (defined (__need_wchar_t) || defined (_STDDEF_H_)) +_TYPE_wchar_t; +#undef _TYPE_wchar_t +#endif + +/* In case nobody has defined these types, but we aren't running under + GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and + __WCHAR_TYPE__ have reasonable values. This can happen if the + parts of GCC is compiled by an older compiler, that actually + include gstddef.h, such as collect2. */ + +/* Signed type of difference of two pointers. */ + +/* Define this type if we are doing the whole job, + or if we want this type in particular. */ +#if defined (_STDDEF_H) || defined (__need_ptrdiff_t) +#ifndef _PTRDIFF_T /* in case has defined it. */ +#ifndef _T_PTRDIFF_ +#ifndef _T_PTRDIFF +#ifndef __PTRDIFF_T +#ifndef _PTRDIFF_T_ +#ifndef _BSD_PTRDIFF_T_ +#ifndef ___int_ptrdiff_t_h +#ifndef _GCC_PTRDIFF_T +#define _PTRDIFF_T +#define _T_PTRDIFF_ +#define _T_PTRDIFF +#define __PTRDIFF_T +#define _PTRDIFF_T_ +#define _BSD_PTRDIFF_T_ +#define ___int_ptrdiff_t_h +#define _GCC_PTRDIFF_T +#ifndef __PTRDIFF_TYPE__ +#define __PTRDIFF_TYPE__ long int +#endif +typedef __PTRDIFF_TYPE__ ptrdiff_t; +#endif /* _GCC_PTRDIFF_T */ +#endif /* ___int_ptrdiff_t_h */ +#endif /* _BSD_PTRDIFF_T_ */ +#endif /* _PTRDIFF_T_ */ +#endif /* __PTRDIFF_T */ +#endif /* _T_PTRDIFF */ +#endif /* _T_PTRDIFF_ */ +#endif /* _PTRDIFF_T */ + +/* If this symbol has done its job, get rid of it. */ +#undef __need_ptrdiff_t + +#endif /* _STDDEF_H or __need_ptrdiff_t. */ + +/* Unsigned type of `sizeof' something. */ + +/* Define this type if we are doing the whole job, + or if we want this type in particular. */ +#if defined (_STDDEF_H) || defined (__need_size_t) +#ifndef __size_t__ /* BeOS */ +#ifndef __SIZE_T__ /* Cray Unicos/Mk */ +#ifndef _SIZE_T /* in case has defined it. */ +#ifndef _SYS_SIZE_T_H +#ifndef _T_SIZE_ +#ifndef _T_SIZE +#ifndef __SIZE_T +#ifndef _SIZE_T_ +#ifndef _BSD_SIZE_T_ +#ifndef _SIZE_T_DEFINED_ +#ifndef _SIZE_T_DEFINED +#ifndef _BSD_SIZE_T_DEFINED_ /* Darwin */ +#ifndef _SIZE_T_DECLARED /* FreeBSD 5 */ +#ifndef ___int_size_t_h +#ifndef _GCC_SIZE_T +#ifndef _SIZET_ +#ifndef __size_t +#define __size_t__ /* BeOS */ +#define __SIZE_T__ /* Cray Unicos/Mk */ +#define _SIZE_T +#define _SYS_SIZE_T_H +#define _T_SIZE_ +#define _T_SIZE +#define __SIZE_T +#define _SIZE_T_ +#define _BSD_SIZE_T_ +#define _SIZE_T_DEFINED_ +#define _SIZE_T_DEFINED +#define _BSD_SIZE_T_DEFINED_ /* Darwin */ +#define _SIZE_T_DECLARED /* FreeBSD 5 */ +#define ___int_size_t_h +#define _GCC_SIZE_T +#define _SIZET_ +#if defined (__FreeBSD__) && (__FreeBSD__ >= 5) +/* __size_t is a typedef on FreeBSD 5!, must not trash it. */ +#else +#define __size_t +#endif +#ifndef __SIZE_TYPE__ +#define __SIZE_TYPE__ long unsigned int +#endif +#if !(defined (__GNUG__) && defined (size_t)) +typedef __SIZE_TYPE__ size_t; +#ifdef __BEOS__ +typedef long ssize_t; +#endif /* __BEOS__ */ +#endif /* !(defined (__GNUG__) && defined (size_t)) */ +#endif /* __size_t */ +#endif /* _SIZET_ */ +#endif /* _GCC_SIZE_T */ +#endif /* ___int_size_t_h */ +#endif /* _SIZE_T_DECLARED */ +#endif /* _BSD_SIZE_T_DEFINED_ */ +#endif /* _SIZE_T_DEFINED */ +#endif /* _SIZE_T_DEFINED_ */ +#endif /* _BSD_SIZE_T_ */ +#endif /* _SIZE_T_ */ +#endif /* __SIZE_T */ +#endif /* _T_SIZE */ +#endif /* _T_SIZE_ */ +#endif /* _SYS_SIZE_T_H */ +#endif /* _SIZE_T */ +#endif /* __SIZE_T__ */ +#endif /* __size_t__ */ +#undef __need_size_t +#endif /* _STDDEF_H or __need_size_t. */ + + +/* Wide character type. + Locale-writers should change this as necessary to + be big enough to hold unique values not between 0 and 127, + and not (wchar_t) -1, for each defined multibyte character. */ + +/* Define this type if we are doing the whole job, + or if we want this type in particular. */ +#if defined (_STDDEF_H) || defined (__need_wchar_t) +#ifndef __wchar_t__ /* BeOS */ +#ifndef __WCHAR_T__ /* Cray Unicos/Mk */ +#ifndef _WCHAR_T +#ifndef _T_WCHAR_ +#ifndef _T_WCHAR +#ifndef __WCHAR_T +#ifndef _WCHAR_T_ +#ifndef _BSD_WCHAR_T_ +#ifndef _BSD_WCHAR_T_DEFINED_ /* Darwin */ +#ifndef _BSD_RUNE_T_DEFINED_ /* Darwin */ +#ifndef _WCHAR_T_DECLARED /* FreeBSD 5 */ +#ifndef _WCHAR_T_DEFINED_ +#ifndef _WCHAR_T_DEFINED +#ifndef _WCHAR_T_H +#ifndef ___int_wchar_t_h +#ifndef __INT_WCHAR_T_H +#ifndef _GCC_WCHAR_T +#define __wchar_t__ /* BeOS */ +#define __WCHAR_T__ /* Cray Unicos/Mk */ +#define _WCHAR_T +#define _T_WCHAR_ +#define _T_WCHAR +#define __WCHAR_T +#define _WCHAR_T_ +#define _BSD_WCHAR_T_ +#define _WCHAR_T_DEFINED_ +#define _WCHAR_T_DEFINED +#define _WCHAR_T_H +#define ___int_wchar_t_h +#define __INT_WCHAR_T_H +#define _GCC_WCHAR_T +#define _WCHAR_T_DECLARED + +/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_ + instead of _WCHAR_T_, and _BSD_RUNE_T_ (which, unlike the other + symbols in the _FOO_T_ family, stays defined even after its + corresponding type is defined). If we define wchar_t, then we + must undef _WCHAR_T_; for BSD/386 1.1 (and perhaps others), if + we undef _WCHAR_T_, then we must also define rune_t, since + headers like runetype.h assume that if machine/ansi.h is included, + and _BSD_WCHAR_T_ is not defined, then rune_t is available. + machine/ansi.h says, "Note that _WCHAR_T_ and _RUNE_T_ must be of + the same type." */ +#ifdef _BSD_WCHAR_T_ +#undef _BSD_WCHAR_T_ +#ifdef _BSD_RUNE_T_ +#if !defined (_ANSI_SOURCE) && !defined (_POSIX_SOURCE) +typedef _BSD_RUNE_T_ rune_t; +#define _BSD_WCHAR_T_DEFINED_ +#define _BSD_RUNE_T_DEFINED_ /* Darwin */ +#if defined (__FreeBSD__) && (__FreeBSD__ < 5) +/* Why is this file so hard to maintain properly? In contrast to + the comment above regarding BSD/386 1.1, on FreeBSD for as long + as the symbol has existed, _BSD_RUNE_T_ must not stay defined or + redundant typedefs will occur when stdlib.h is included after this file. */ +#undef _BSD_RUNE_T_ +#endif +#endif +#endif +#endif +/* FreeBSD 5 can't be handled well using "traditional" logic above + since it no longer defines _BSD_RUNE_T_ yet still desires to export + rune_t in some cases... */ +#if defined (__FreeBSD__) && (__FreeBSD__ >= 5) +#if !defined (_ANSI_SOURCE) && !defined (_POSIX_SOURCE) +#if __BSD_VISIBLE +#ifndef _RUNE_T_DECLARED +typedef __rune_t rune_t; +#define _RUNE_T_DECLARED +#endif +#endif +#endif +#endif + +#ifndef __WCHAR_TYPE__ +#define __WCHAR_TYPE__ int +#endif +#ifndef __cplusplus +typedef __WCHAR_TYPE__ wchar_t; +#endif +#endif +#endif +#endif +#endif +#endif +#endif +#endif /* _WCHAR_T_DECLARED */ +#endif /* _BSD_RUNE_T_DEFINED_ */ +#endif +#endif +#endif +#endif +#endif +#endif +#endif +#endif /* __WCHAR_T__ */ +#endif /* __wchar_t__ */ +#undef __need_wchar_t +#endif /* _STDDEF_H or __need_wchar_t. */ + +#if defined (__need_wint_t) +#ifndef _WINT_T +#define _WINT_T + +#ifndef __WINT_TYPE__ +#define __WINT_TYPE__ unsigned int +#endif +typedef __WINT_TYPE__ wint_t; +#endif +#undef __need_wint_t +#endif + +/* In 4.3bsd-net2, leave these undefined to indicate that size_t, etc. + are already defined. */ +/* BSD/OS 3.1 and FreeBSD [23].x require the MACHINE_ANSI_H check here. */ +#if defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) +/* The references to _GCC_PTRDIFF_T_, _GCC_SIZE_T_, and _GCC_WCHAR_T_ + are probably typos and should be removed before 2.8 is released. */ +#ifdef _GCC_PTRDIFF_T_ +#undef _PTRDIFF_T_ +#undef _BSD_PTRDIFF_T_ +#endif +#ifdef _GCC_SIZE_T_ +#undef _SIZE_T_ +#undef _BSD_SIZE_T_ +#endif +#ifdef _GCC_WCHAR_T_ +#undef _WCHAR_T_ +#undef _BSD_WCHAR_T_ +#endif +/* The following ones are the real ones. */ +#ifdef _GCC_PTRDIFF_T +#undef _PTRDIFF_T_ +#undef _BSD_PTRDIFF_T_ +#endif +#ifdef _GCC_SIZE_T +#undef _SIZE_T_ +#undef _BSD_SIZE_T_ +#endif +#ifdef _GCC_WCHAR_T +#undef _WCHAR_T_ +#undef _BSD_WCHAR_T_ +#endif +#endif /* _ANSI_H_ || _MACHINE_ANSI_H_ */ + +#endif /* __sys_stdtypes_h */ + +/* A null pointer constant. */ + +#if defined (_STDDEF_H) || defined (__need_NULL) +#undef NULL /* in case has defined it. */ +#ifdef __GNUG__ +#define NULL __null +#else /* G++ */ +#ifndef __cplusplus +#define NULL ((void *)0) +#else /* C++ */ +#define NULL 0 +#endif /* C++ */ +#endif /* G++ */ +#endif /* NULL not defined and or need NULL. */ +#undef __need_NULL + +#ifdef _STDDEF_H + +/* Offset of member MEMBER in a struct of type TYPE. */ +#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER) + +#endif /* _STDDEF_H was defined this time */ + +#endif /* !_STDDEF_H && !_STDDEF_H_ && !_ANSI_STDDEF_H && !__STDDEF_H__ + || __need_XXX was not defined before */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/stdint.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/stdint.h new file mode 120000 index 0000000..cff0fff --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/stdint.h @@ -0,0 +1 @@ +../../../../../include/gcc/darwin/4.2/stdint.h \ No newline at end of file diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/syslimits.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/syslimits.h new file mode 100644 index 0000000..a449979 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/syslimits.h @@ -0,0 +1,8 @@ +/* syslimits.h stands for the system's own limits.h file. + If we can use it ok unmodified, then we install this text. + If fixincludes fixes it, then the fixed version is installed + instead of this text. */ + +#define _GCC_NEXT_LIMITS_H /* tell gcc's limits.h to recurse */ +/* APPLE LOCAL begin 4401222 */ +/* APPLE LOCAL end 4401222 */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/tgmath.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/tgmath.h new file mode 100644 index 0000000..0874196 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/tgmath.h @@ -0,0 +1,182 @@ +/* APPLE LOCAL file mainline 2007-06-12 2872232 */ +/* Copyright (C) 2004 + Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* + * ISO C Standard: 7.22 Type-generic math + */ + +#ifndef _TGMATH_H +#define _TGMATH_H + +#include + +#ifndef __cplusplus +#include + +/* Naming convention: generic macros are defining using + __TGMATH_CPLX*, __TGMATH_REAL*, and __TGMATH_CPLX_ONLY. _CPLX + means the generic argument(s) may be real or complex, _REAL means + real only, _CPLX means complex only. If there is no suffix, we are + defining a function of one generic argument. If the suffix is _n + it is a function of n generic arguments. If the suffix is _m_n it + is a function of n arguments, the first m of which are generic. We + only define these macros for values of n and/or m that are needed. */ + +/* The general rules for generic macros are given in 7.22 paragraphs 1 and 2. + If any generic parameter is complex, we use a complex version. Otherwise + we use a real version. If the real part of any generic parameter is long + double, we use the long double version. Otherwise if the real part of any + generic paramter is double or of integer type, we use the double version. + Otherwise we use the float version. */ + +#define __tg_cplx(expr) \ + __builtin_classify_type(expr) == 9 + +#define __tg_ldbl(expr) \ + __builtin_types_compatible_p(__typeof__(expr), long double) + +#define __tg_dbl(expr) \ + (__builtin_types_compatible_p(__typeof__(expr), double) \ + || __builtin_classify_type(expr) == 1) + +#define __tg_choose(x,f,d,l) \ + __builtin_choose_expr(__tg_ldbl(x), l, \ + __builtin_choose_expr(__tg_dbl(x), d, \ + f)) + +#define __tg_choose_2(x,y,f,d,l) \ + __builtin_choose_expr(__tg_ldbl(x) || __tg_ldbl(y), l, \ + __builtin_choose_expr(__tg_dbl(x) || __tg_dbl(y), d, \ + f)) + +#define __tg_choose_3(x,y,z,f,d,l) \ + __builtin_choose_expr(__tg_ldbl(x) || __tg_ldbl(y) || __tg_ldbl(z), l, \ + __builtin_choose_expr(__tg_dbl(x) || __tg_dbl(y) \ + || __tg_dbl(z), d, \ + f)) + +#define __TGMATH_CPLX(z,R,C) \ + __builtin_choose_expr (__tg_cplx(z), \ + __tg_choose (__real__(z), C##f(z), (C)(z), C##l(z)), \ + /* APPLE LOCAL shorten-64-to-32 4604239 */ \ + __tg_choose (z, R##f((float)(z)), (R)(z), R##l(z))) + +#define __TGMATH_CPLX_2(z1,z2,R,C) \ + __builtin_choose_expr (__tg_cplx(z1) || __tg_cplx(z2), \ + __tg_choose_2 (__real__(z1), __real__(z2), \ + C##f(z1,z2), (C)(z1,z2), C##l(z1,z2)), \ + __tg_choose_2 (z1, z2, \ + /* APPLE LOCAL shorten-64-to-32 5909621 */ \ + R##f((float)(z1),(float)(z2)), (R)(z1,z2), R##l(z1,z2))) + +#define __TGMATH_REAL(x,R) \ + /* APPLE LOCAL shorten-64-to-32 5909621 */ \ + __tg_choose (x, R##f((float)(x)), (R)(x), R##l(x)) +#define __TGMATH_REAL_2(x,y,R) \ + /* APPLE LOCAL shorten-64-to-32 4604239 */ \ + __tg_choose_2 (x, y, R##f((float)(x),(float)(y)), (R)(x,y), R##l(x,y)) +#define __TGMATH_REAL_3(x,y,z,R) \ + /* APPLE LOCAL shorten-64-to-32 5909621 */ \ + __tg_choose_3 (x, y, z, R##f((float)(x),(float)(y),(float)(z)), (R)(x,y,z), R##l(x,y,z)) +#define __TGMATH_REAL_1_2(x,y,R) \ + /* APPLE LOCAL shorten-64-to-32 5909621 */ \ + __tg_choose (x, R##f((float)(x),y), (R)(x,y), R##l(x,y)) +#define __TGMATH_REAL_2_3(x,y,z,R) \ + /* APPLE LOCAL shorten-64-to-32 5909621 */ \ + __tg_choose_2 (x, y, R##f((float)(x),(float)(y),z), (R)(x,y,z), R##l(x,y,z)) +#define __TGMATH_CPLX_ONLY(z,C) \ + __tg_choose (__real__(z), C##f(z), (C)(z), C##l(z)) + +/* Functions defined in both and (7.22p4) */ +#define acos(z) __TGMATH_CPLX(z, acos, cacos) +#define asin(z) __TGMATH_CPLX(z, asin, casin) +#define atan(z) __TGMATH_CPLX(z, atan, catan) +#define acosh(z) __TGMATH_CPLX(z, acosh, cacosh) +#define asinh(z) __TGMATH_CPLX(z, asinh, casinh) +#define atanh(z) __TGMATH_CPLX(z, atanh, catanh) +#define cos(z) __TGMATH_CPLX(z, cos, ccos) +#define sin(z) __TGMATH_CPLX(z, sin, csin) +#define tan(z) __TGMATH_CPLX(z, tan, ctan) +#define cosh(z) __TGMATH_CPLX(z, cosh, ccosh) +#define sinh(z) __TGMATH_CPLX(z, sinh, csinh) +#define tanh(z) __TGMATH_CPLX(z, tanh, ctanh) +#define exp(z) __TGMATH_CPLX(z, exp, cexp) +#define log(z) __TGMATH_CPLX(z, log, clog) +#define pow(z1,z2) __TGMATH_CPLX_2(z1, z2, pow, cpow) +#define sqrt(z) __TGMATH_CPLX(z, sqrt, csqrt) +#define fabs(z) __TGMATH_CPLX(z, fabs, cabs) + +/* Functions defined in only (7.22p5) */ +#define atan2(x,y) __TGMATH_REAL_2(x, y, atan2) +#define cbrt(x) __TGMATH_REAL(x, cbrt) +#define ceil(x) __TGMATH_REAL(x, ceil) +#define copysign(x,y) __TGMATH_REAL_2(x, y, copysign) +#define erf(x) __TGMATH_REAL(x, erf) +#define erfc(x) __TGMATH_REAL(x, erfc) +#define exp2(x) __TGMATH_REAL(x, exp2) +#define expm1(x) __TGMATH_REAL(x, expm1) +#define fdim(x,y) __TGMATH_REAL_2(x, y, fdim) +#define floor(x) __TGMATH_REAL(x, floor) +#define fma(x,y,z) __TGMATH_REAL_3(x, y, z, fma) +#define fmax(x,y) __TGMATH_REAL_2(x, y, fmax) +#define fmin(x,y) __TGMATH_REAL_2(x, y, fmin) +#define fmod(x,y) __TGMATH_REAL_2(x, y, fmod) +#define frexp(x,y) __TGMATH_REAL_1_2(x, y, frexp) +#define hypot(x,y) __TGMATH_REAL_2(x, y, hypot) +#define ilogb(x) __TGMATH_REAL(x, ilogb) +#define ldexp(x,y) __TGMATH_REAL_1_2(x, y, ldexp) +#define lgamma(x) __TGMATH_REAL(x, lgamma) +#define llrint(x) __TGMATH_REAL(x, llrint) +#define llround(x) __TGMATH_REAL(x, llround) +#define log10(x) __TGMATH_REAL(x, log10) +#define log1p(x) __TGMATH_REAL(x, log1p) +#define log2(x) __TGMATH_REAL(x, log2) +#define logb(x) __TGMATH_REAL(x, logb) +#define lrint(x) __TGMATH_REAL(x, lrint) +#define lround(x) __TGMATH_REAL(x, lround) +#define nearbyint(x) __TGMATH_REAL(x, nearbyint) +#define nextafter(x,y) __TGMATH_REAL_2(x, y, nextafter) +#define nexttoward(x,y) __TGMATH_REAL_1_2(x, y, nexttoward) +#define remainder(x,y) __TGMATH_REAL_2(x, y, remainder) +#define remquo(x,y,z) __TGMATH_REAL_2_3(x, y, z, remquo) +#define rint(x) __TGMATH_REAL(x, rint) +#define round(x) __TGMATH_REAL(x, round) +#define scalbn(x,y) __TGMATH_REAL_1_2(x, y, scalbn) +#define scalbln(x,y) __TGMATH_REAL_1_2(x, y, scalbln) +#define tgamma(x) __TGMATH_REAL(x, tgamma) +#define trunc(x) __TGMATH_REAL(x, trunc) + +/* Functions defined in only (7.22p6) */ +#define carg(z) __TGMATH_CPLX_ONLY(z, carg) +#define cimag(z) __TGMATH_CPLX_ONLY(z, cimag) +#define conj(z) __TGMATH_CPLX_ONLY(z, conj) +#define cproj(z) __TGMATH_CPLX_ONLY(z, cproj) +#define creal(z) __TGMATH_CPLX_ONLY(z, creal) + +#endif /* __cplusplus */ +#endif /* _TGMATH_H */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/tmmintrin.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/tmmintrin.h new file mode 100644 index 0000000..1bb254b --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/tmmintrin.h @@ -0,0 +1,304 @@ +/* APPLE LOCAL file ssse3 4424835 */ +/* Copyright (C) 2006 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.1. */ + +#ifndef _TMMINTRIN_H_INCLUDED +#define _TMMINTRIN_H_INCLUDED + +#ifdef __SSSE3__ +#include + +/* APPLE LOCAL begin nodebug inline */ +#define __always_inline__ __always_inline__, __nodebug__ +/* APPLE LOCAL end nodebug inline */ + +/* APPLE LOCAL begin radar 5618945 */ +#undef __STATIC_INLINE +#ifdef __GNUC_STDC_INLINE__ +#define __STATIC_INLINE __inline +#else +#define __STATIC_INLINE static __inline +#endif +/* APPLE LOCAL end radar 5618945 */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hadd_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phaddw128 ((__v8hi)__X, (__v8hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hadd_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phaddd128 ((__v4si)__X, (__v4si)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hadds_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phaddsw128 ((__v8hi)__X, (__v8hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hadd_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phaddw ((__v4hi)__X, (__v4hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hadd_pi32 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phaddd ((__v2si)__X, (__v2si)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hadds_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phaddsw ((__v4hi)__X, (__v4hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hsub_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phsubw128 ((__v8hi)__X, (__v8hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hsub_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phsubd128 ((__v4si)__X, (__v4si)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hsubs_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phsubsw128 ((__v8hi)__X, (__v8hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hsub_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phsubw ((__v4hi)__X, (__v4hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hsub_pi32 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phsubd ((__v2si)__X, (__v2si)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hsubs_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phsubsw ((__v4hi)__X, (__v4hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_maddubs_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaddubsw128 ((__v16qi)__X, (__v16qi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_maddubs_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_pmaddubsw ((__v8qi)__X, (__v8qi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mulhrs_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmulhrsw128 ((__v8hi)__X, (__v8hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mulhrs_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_pmulhrsw ((__v4hi)__X, (__v4hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_shuffle_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pshufb128 ((__v16qi)__X, (__v16qi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_shuffle_pi8 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_pshufb ((__v8qi)__X, (__v8qi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sign_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psignb128 ((__v16qi)__X, (__v16qi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sign_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psignw128 ((__v8hi)__X, (__v8hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sign_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psignd128 ((__v4si)__X, (__v4si)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sign_pi8 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_psignb ((__v8qi)__X, (__v8qi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sign_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_psignw ((__v4hi)__X, (__v4hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sign_pi32 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_psignd ((__v2si)__X, (__v2si)__Y); +} + +/* APPLE LOCAL begin 5814283 */ +#define _mm_alignr_epi8(__X, __Y, __N) \ + ((__m128i)__builtin_ia32_palignr128 ((__v2di)(__X), (__v2di)(__Y), (__N) * 8)) +/* APPLE LOCAL end 5814283 */ + +#define _mm_alignr_pi8(__X, __Y, __N) \ + ((__m64)__builtin_ia32_palignr ((long long) (__X), (long long) (__Y), (__N) * 8)) + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_abs_epi8 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pabsb128 ((__v16qi)__X); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_abs_epi16 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pabsw128 ((__v8hi)__X); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_abs_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pabsd128 ((__v4si)__X); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_abs_pi8 (__m64 __X) +{ + return (__m64) __builtin_ia32_pabsb ((__v8qi)__X); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_abs_pi16 (__m64 __X) +{ + return (__m64) __builtin_ia32_pabsw ((__v4hi)__X); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_abs_pi32 (__m64 __X) +{ + return (__m64) __builtin_ia32_pabsd ((__v2si)__X); +} + +/* APPLE LOCAL begin nodebug inline */ +#undef __always_inline__ +/* APPLE LOCAL end nodebug inline */ + +#endif /* __SSSE3__ */ + +#endif /* _TMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/unwind.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/unwind.h new file mode 100644 index 0000000..3f4c065 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/unwind.h @@ -0,0 +1,241 @@ +/* Exception handling and frame unwind runtime interface routines. + Copyright (C) 2001, 2003, 2004, 2006 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to the Free + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* This is derived from the C++ ABI for IA-64. Where we diverge + for cross-architecture compatibility are noted with "@@@". */ + +#ifndef _UNWIND_H +#define _UNWIND_H + +#ifndef HIDE_EXPORTS +#pragma GCC visibility push(default) +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* Level 1: Base ABI */ + +/* @@@ The IA-64 ABI uses uint64 throughout. Most places this is + inefficient for 32-bit and smaller machines. */ +typedef unsigned _Unwind_Word __attribute__((__mode__(__word__))); +typedef signed _Unwind_Sword __attribute__((__mode__(__word__))); +#if defined(__ia64__) && defined(__hpux__) +typedef unsigned _Unwind_Ptr __attribute__((__mode__(__word__))); +#else +typedef unsigned _Unwind_Ptr __attribute__((__mode__(__pointer__))); +#endif +typedef unsigned _Unwind_Internal_Ptr __attribute__((__mode__(__pointer__))); + +/* @@@ The IA-64 ABI uses a 64-bit word to identify the producer and + consumer of an exception. We'll go along with this for now even on + 32-bit machines. We'll need to provide some other option for + 16-bit machines and for machines with > 8 bits per byte. */ +typedef unsigned _Unwind_Exception_Class __attribute__((__mode__(__DI__))); + +/* The unwind interface uses reason codes in several contexts to + identify the reasons for failures or other actions. */ +typedef enum +{ + _URC_NO_REASON = 0, + _URC_FOREIGN_EXCEPTION_CAUGHT = 1, + _URC_FATAL_PHASE2_ERROR = 2, + _URC_FATAL_PHASE1_ERROR = 3, + _URC_NORMAL_STOP = 4, + _URC_END_OF_STACK = 5, + _URC_HANDLER_FOUND = 6, + _URC_INSTALL_CONTEXT = 7, + _URC_CONTINUE_UNWIND = 8 +} _Unwind_Reason_Code; + + +/* The unwind interface uses a pointer to an exception header object + as its representation of an exception being thrown. In general, the + full representation of an exception object is language- and + implementation-specific, but it will be prefixed by a header + understood by the unwind interface. */ + +struct _Unwind_Exception; + +typedef void (*_Unwind_Exception_Cleanup_Fn) (_Unwind_Reason_Code, + struct _Unwind_Exception *); + +struct _Unwind_Exception +{ + _Unwind_Exception_Class exception_class; + _Unwind_Exception_Cleanup_Fn exception_cleanup; + _Unwind_Word private_1; + _Unwind_Word private_2; + + /* @@@ The IA-64 ABI says that this structure must be double-word aligned. + Taking that literally does not make much sense generically. Instead we + provide the maximum alignment required by any type for the machine. */ +} __attribute__((__aligned__)); + + +/* The ACTIONS argument to the personality routine is a bitwise OR of one + or more of the following constants. */ +typedef int _Unwind_Action; + +#define _UA_SEARCH_PHASE 1 +#define _UA_CLEANUP_PHASE 2 +#define _UA_HANDLER_FRAME 4 +#define _UA_FORCE_UNWIND 8 +#define _UA_END_OF_STACK 16 + +/* This is an opaque type used to refer to a system-specific data + structure used by the system unwinder. This context is created and + destroyed by the system, and passed to the personality routine + during unwinding. */ +struct _Unwind_Context; + +/* Raise an exception, passing along the given exception object. */ +extern _Unwind_Reason_Code _Unwind_RaiseException (struct _Unwind_Exception *); + +/* Raise an exception for forced unwinding. */ + +typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn) + (int, _Unwind_Action, _Unwind_Exception_Class, + struct _Unwind_Exception *, struct _Unwind_Context *, void *); + +extern _Unwind_Reason_Code _Unwind_ForcedUnwind (struct _Unwind_Exception *, + _Unwind_Stop_Fn, + void *); + +/* Helper to invoke the exception_cleanup routine. */ +extern void _Unwind_DeleteException (struct _Unwind_Exception *); + +/* Resume propagation of an existing exception. This is used after + e.g. executing cleanup code, and not to implement rethrowing. */ +extern void _Unwind_Resume (struct _Unwind_Exception *); + +/* @@@ Resume propagation of an FORCE_UNWIND exception, or to rethrow + a normal exception that was handled. */ +extern _Unwind_Reason_Code _Unwind_Resume_or_Rethrow (struct _Unwind_Exception *); + +/* @@@ Use unwind data to perform a stack backtrace. The trace callback + is called for every stack frame in the call chain, but no cleanup + actions are performed. */ +typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn) + (struct _Unwind_Context *, void *); + +extern _Unwind_Reason_Code _Unwind_Backtrace (_Unwind_Trace_Fn, void *); + +/* These functions are used for communicating information about the unwind + context (i.e. the unwind descriptors and the user register state) between + the unwind library and the personality routine and landing pad. Only + selected registers maybe manipulated. */ + +extern _Unwind_Word _Unwind_GetGR (struct _Unwind_Context *, int); +extern void _Unwind_SetGR (struct _Unwind_Context *, int, _Unwind_Word); + +extern _Unwind_Ptr _Unwind_GetIP (struct _Unwind_Context *); +extern _Unwind_Ptr _Unwind_GetIPInfo (struct _Unwind_Context *, int *); +extern void _Unwind_SetIP (struct _Unwind_Context *, _Unwind_Ptr); + +/* @@@ Retrieve the CFA of the given context. */ +extern _Unwind_Word _Unwind_GetCFA (struct _Unwind_Context *); + +extern void *_Unwind_GetLanguageSpecificData (struct _Unwind_Context *); + +extern _Unwind_Ptr _Unwind_GetRegionStart (struct _Unwind_Context *); + + +/* The personality routine is the function in the C++ (or other language) + runtime library which serves as an interface between the system unwind + library and language-specific exception handling semantics. It is + specific to the code fragment described by an unwind info block, and + it is always referenced via the pointer in the unwind info block, and + hence it has no ABI-specified name. + + Note that this implies that two different C++ implementations can + use different names, and have different contents in the language + specific data area. Moreover, that the language specific data + area contains no version info because name of the function invoked + provides more effective versioning by detecting at link time the + lack of code to handle the different data format. */ + +typedef _Unwind_Reason_Code (*_Unwind_Personality_Fn) + (int, _Unwind_Action, _Unwind_Exception_Class, + struct _Unwind_Exception *, struct _Unwind_Context *); + +/* @@@ The following alternate entry points are for setjmp/longjmp + based unwinding. */ + +struct SjLj_Function_Context; +extern void _Unwind_SjLj_Register (struct SjLj_Function_Context *); +extern void _Unwind_SjLj_Unregister (struct SjLj_Function_Context *); + +extern _Unwind_Reason_Code _Unwind_SjLj_RaiseException + (struct _Unwind_Exception *); +extern _Unwind_Reason_Code _Unwind_SjLj_ForcedUnwind + (struct _Unwind_Exception *, _Unwind_Stop_Fn, void *); +extern void _Unwind_SjLj_Resume (struct _Unwind_Exception *); +extern _Unwind_Reason_Code _Unwind_SjLj_Resume_or_Rethrow (struct _Unwind_Exception *); + +/* @@@ The following provide access to the base addresses for text + and data-relative addressing in the LDSA. In order to stay link + compatible with the standard ABI for IA-64, we inline these. */ + +#ifdef __ia64__ +#include + +static inline _Unwind_Ptr +_Unwind_GetDataRelBase (struct _Unwind_Context *_C) +{ + /* The GP is stored in R1. */ + return _Unwind_GetGR (_C, 1); +} + +static inline _Unwind_Ptr +_Unwind_GetTextRelBase (struct _Unwind_Context *_C __attribute__ ((__unused__))) +{ + abort (); + return 0; +} + +/* @@@ Retrieve the Backing Store Pointer of the given context. */ +extern _Unwind_Word _Unwind_GetBSP (struct _Unwind_Context *); +#else +extern _Unwind_Ptr _Unwind_GetDataRelBase (struct _Unwind_Context *); +extern _Unwind_Ptr _Unwind_GetTextRelBase (struct _Unwind_Context *); +#endif + +/* @@@ Given an address, return the entry point of the function that + contains it. */ +extern void * _Unwind_FindEnclosingFunction (void *pc); + +#ifdef __cplusplus +} +#endif + +#ifndef HIDE_EXPORTS +#pragma GCC visibility pop +#endif + +#endif /* unwind.h */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/varargs.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/varargs.h new file mode 100644 index 0000000..4b9803e --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/varargs.h @@ -0,0 +1,7 @@ +#ifndef _VARARGS_H +#define _VARARGS_H + +#error "GCC no longer implements ." +#error "Revise your code to use ." + +#endif diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/include/xmmintrin.h b/lib/gcc/i686-apple-darwin10/4.2.1/include/xmmintrin.h new file mode 100644 index 0000000..ad805b8 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/include/xmmintrin.h @@ -0,0 +1,1582 @@ +/* APPLE LOCAL file mainline 2005-06-30 Radar 4131077 */ +/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 + Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef _XMMINTRIN_H_INCLUDED +#define _XMMINTRIN_H_INCLUDED + +#ifndef __SSE__ +# error "SSE instruction set not enabled" +#else + +/* We need type definitions from the MMX header file. */ +#include + +/* Get _mm_malloc () and _mm_free (). */ +/* APPLE LOCAL begin xmmintrin.h for kernel 4123064 */ +#if __STDC_HOSTED__ +#include +#endif +/* APPLE LOCAL end xmmintrin.h for kernel 4123064 */ + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__)); + +/* Internal data types for implementing the intrinsics. */ +typedef float __v4sf __attribute__ ((__vector_size__ (16))); + +/* Create a selector for use with the SHUFPS instruction. */ +#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \ + (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0)) + +/* Constants for use with _mm_prefetch. */ +enum _mm_hint +{ + _MM_HINT_T0 = 3, + _MM_HINT_T1 = 2, + _MM_HINT_T2 = 1, + _MM_HINT_NTA = 0 +}; + +/* Bits in the MXCSR. */ +#define _MM_EXCEPT_MASK 0x003f +#define _MM_EXCEPT_INVALID 0x0001 +#define _MM_EXCEPT_DENORM 0x0002 +#define _MM_EXCEPT_DIV_ZERO 0x0004 +#define _MM_EXCEPT_OVERFLOW 0x0008 +#define _MM_EXCEPT_UNDERFLOW 0x0010 +#define _MM_EXCEPT_INEXACT 0x0020 + +#define _MM_MASK_MASK 0x1f80 +#define _MM_MASK_INVALID 0x0080 +#define _MM_MASK_DENORM 0x0100 +#define _MM_MASK_DIV_ZERO 0x0200 +#define _MM_MASK_OVERFLOW 0x0400 +#define _MM_MASK_UNDERFLOW 0x0800 +#define _MM_MASK_INEXACT 0x1000 + +#define _MM_ROUND_MASK 0x6000 +#define _MM_ROUND_NEAREST 0x0000 +#define _MM_ROUND_DOWN 0x2000 +#define _MM_ROUND_UP 0x4000 +#define _MM_ROUND_TOWARD_ZERO 0x6000 + +#define _MM_FLUSH_ZERO_MASK 0x8000 +#define _MM_FLUSH_ZERO_ON 0x8000 +#define _MM_FLUSH_ZERO_OFF 0x0000 + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#define __always_inline__ __always_inline__, __nodebug__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +/* APPLE LOCAL begin radar 5618945 */ +#undef __STATIC_INLINE +#ifdef __GNUC_STDC_INLINE__ +#define __STATIC_INLINE __inline +#else +#define __STATIC_INLINE static __inline +#endif +/* APPLE LOCAL end radar 5618945 */ + +/* Create a vector of zeros. */ +/* APPLE LOCAL begin radar 4152603 */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setzero_ps (void) +{ + return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f }; +} + +/* Perform the respective operation on the lower SPFP (single-precision + floating-point) values of A and B; the upper three SPFP values are + passed through from A. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mul_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_div_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sqrt_ss (__m128 __A) +{ + return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_rcp_ss (__m128 __A) +{ + return (__m128) __builtin_ia32_rcpss ((__v4sf)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_rsqrt_ss (__m128 __A) +{ + return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_min_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_max_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B); +} + +/* Perform the respective operation on the four SPFP values in A and B. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mul_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_div_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sqrt_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_rcp_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_rcpps ((__v4sf)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_rsqrt_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_min_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_max_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B); +} + +/* Perform logical bit-wise operations on 128-bit values. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_and_ps (__m128 __A, __m128 __B) +{ + return __builtin_ia32_andps (__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_andnot_ps (__m128 __A, __m128 __B) +{ + return __builtin_ia32_andnps (__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_or_ps (__m128 __A, __m128 __B) +{ + return __builtin_ia32_orps (__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_xor_ps (__m128 __A, __m128 __B) +{ + return __builtin_ia32_xorps (__A, __B); +} + +/* Perform a comparison on the lower SPFP values of A and B. If the + comparison is true, place a mask of all ones in the result, otherwise a + mask of zeros. The upper three SPFP values are passed through from A. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmplt_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmple_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf) __A, + (__v4sf) + __builtin_ia32_cmpltss ((__v4sf) __B, + (__v4sf) + __A)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpge_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf) __A, + (__v4sf) + __builtin_ia32_cmpless ((__v4sf) __B, + (__v4sf) + __A)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpneq_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnlt_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnle_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpngt_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf) __A, + (__v4sf) + __builtin_ia32_cmpnltss ((__v4sf) __B, + (__v4sf) + __A)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnge_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf) __A, + (__v4sf) + __builtin_ia32_cmpnless ((__v4sf) __B, + (__v4sf) + __A)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpord_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpunord_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B); +} + +/* Perform a comparison on the four SPFP values of A and B. For each + element, if the comparison is true, place a mask of all ones in the + result, otherwise a mask of zeros. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmplt_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmple_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpge_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpneq_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnlt_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnle_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpngt_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnge_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpord_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpunord_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B); +} + +/* Compare the lower SPFP values of A and B and return 1 if true + and 0 if false. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comieq_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comilt_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comile_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comigt_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comige_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comineq_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomieq_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomilt_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomile_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomigt_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomige_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomineq_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B); +} + +/* Convert the lower SPFP value to a 32-bit integer according to the current + rounding mode. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtss_si32 (__m128 __A) +{ + return __builtin_ia32_cvtss2si ((__v4sf) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvt_ss2si (__m128 __A) +{ + return _mm_cvtss_si32 (__A); +} + +#ifdef __x86_64__ +/* Convert the lower SPFP value to a 32-bit integer according to the + current rounding mode. */ + +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtss_si64 (__m128 __A) +{ + return __builtin_ia32_cvtss2si64 ((__v4sf) __A); +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtss_si64x (__m128 __A) +{ + return __builtin_ia32_cvtss2si64 ((__v4sf) __A); +} +#endif + +/* Convert the two lower SPFP values to 32-bit integers according to the + current rounding mode. Return the integers in packed form. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtps_pi32 (__m128 __A) +{ + return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvt_ps2pi (__m128 __A) +{ + return _mm_cvtps_pi32 (__A); +} + +/* Truncate the lower SPFP value to a 32-bit integer. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttss_si32 (__m128 __A) +{ + return __builtin_ia32_cvttss2si ((__v4sf) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtt_ss2si (__m128 __A) +{ + return _mm_cvttss_si32 (__A); +} + +#ifdef __x86_64__ +/* Truncate the lower SPFP value to a 32-bit integer. */ + +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttss_si64 (__m128 __A) +{ + return __builtin_ia32_cvttss2si64 ((__v4sf) __A); +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttss_si64x (__m128 __A) +{ + return __builtin_ia32_cvttss2si64 ((__v4sf) __A); +} +#endif + +/* Truncate the two lower SPFP values to 32-bit integers. Return the + integers in packed form. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttps_pi32 (__m128 __A) +{ + return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtt_ps2pi (__m128 __A) +{ + return _mm_cvttps_pi32 (__A); +} + +/* Convert B to a SPFP value and insert it as element zero in A. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi32_ss (__m128 __A, int __B) +{ + return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvt_si2ss (__m128 __A, int __B) +{ + return _mm_cvtsi32_ss (__A, __B); +} + +#ifdef __x86_64__ +/* Convert B to a SPFP value and insert it as element zero in A. */ + +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64_ss (__m128 __A, long long __B) +{ + return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B); +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64x_ss (__m128 __A, long long __B) +{ + return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B); +} +#endif + +/* Convert the two 32-bit values in B to SPFP form and insert them + as the two lower elements in A. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpi32_ps (__m128 __A, __m64 __B) +{ + return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvt_pi2ps (__m128 __A, __m64 __B) +{ + return _mm_cvtpi32_ps (__A, __B); +} + +/* Convert the four signed 16-bit values in A to SPFP form. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpi16_ps (__m64 __A) +{ + __v4hi __sign; + __v2si __hisi, __losi; + __v4sf __r; + + /* This comparison against zero gives us a mask that can be used to + fill in the missing sign bits in the unpack operations below, so + that we get signed values after unpacking. */ + __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A); + + /* Convert the four words to doublewords. */ + __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign); + __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign); + + /* Convert the doublewords to floating point two at a time. */ + __r = (__v4sf) _mm_setzero_ps (); + __r = __builtin_ia32_cvtpi2ps (__r, __hisi); + __r = __builtin_ia32_movlhps (__r, __r); + __r = __builtin_ia32_cvtpi2ps (__r, __losi); + + return (__m128) __r; +} + +/* Convert the four unsigned 16-bit values in A to SPFP form. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpu16_ps (__m64 __A) +{ + __v2si __hisi, __losi; + __v4sf __r; + + /* Convert the four words to doublewords. */ + __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL); + __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL); + + /* Convert the doublewords to floating point two at a time. */ + __r = (__v4sf) _mm_setzero_ps (); + __r = __builtin_ia32_cvtpi2ps (__r, __hisi); + __r = __builtin_ia32_movlhps (__r, __r); + __r = __builtin_ia32_cvtpi2ps (__r, __losi); + + return (__m128) __r; +} + +/* Convert the low four signed 8-bit values in A to SPFP form. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpi8_ps (__m64 __A) +{ + __v8qi __sign; + + /* This comparison against zero gives us a mask that can be used to + fill in the missing sign bits in the unpack operations below, so + that we get signed values after unpacking. */ + __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A); + + /* Convert the four low bytes to words. */ + __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign); + + return _mm_cvtpi16_ps(__A); +} + +/* Convert the low four unsigned 8-bit values in A to SPFP form. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpu8_ps(__m64 __A) +{ + __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL); + return _mm_cvtpu16_ps(__A); +} + +/* Convert the four signed 32-bit values in A and B to SPFP form. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpi32x2_ps(__m64 __A, __m64 __B) +{ + __v4sf __zero = (__v4sf) _mm_setzero_ps (); + __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A); + __v4sf __sfb = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__B); + return (__m128) __builtin_ia32_movlhps (__sfa, __sfb); +} + +/* Convert the four SPFP values in A to four signed 16-bit integers. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtps_pi16(__m128 __A) +{ + __v4sf __hisf = (__v4sf)__A; + __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf); + __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf); + __v2si __losi = __builtin_ia32_cvtps2pi (__losf); + return (__m64) __builtin_ia32_packssdw (__hisi, __losi); +} + +/* Convert the four SPFP values in A to four signed 8-bit integers. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtps_pi8(__m128 __A) +{ + __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A); + return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL); +} + +/* Selects four specific SPFP values from A and B based on MASK. */ +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_shuffle_ps (__m128 __A, __m128 __B, int __mask) +{ + return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask); +} +#else +#define _mm_shuffle_ps(A, B, MASK) \ + ((__m128) __builtin_ia32_shufps ((__v4sf)(A), (__v4sf)(B), (MASK))) +#endif + + +/* Selects and interleaves the upper two SPFP values from A and B. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B); +} + +/* Selects and interleaves the lower two SPFP values from A and B. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B); +} + +/* Sets the upper two SPFP values with 64-bits of data loaded from P; + the lower two values are passed through from A. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadh_pi (__m128 __A, __m64 const *__P) +{ + return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (__v2si *)__P); +} + +/* Stores the upper two SPFP values of A into P. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storeh_pi (__m64 *__P, __m128 __A) +{ + __builtin_ia32_storehps ((__v2si *)__P, (__v4sf)__A); +} + +/* Moves the upper two values of B into the lower two values of A. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movehl_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B); +} + +/* Moves the lower two values of B into the upper two values of A. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movelh_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B); +} + +/* Sets the lower two SPFP values with 64-bits of data loaded from P; + the upper two values are passed through from A. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadl_pi (__m128 __A, __m64 const *__P) +{ + return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (__v2si *)__P); +} + +/* Stores the lower two SPFP values of A into P. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storel_pi (__m64 *__P, __m128 __A) +{ + __builtin_ia32_storelps ((__v2si *)__P, (__v4sf)__A); +} + +/* Creates a 4-bit mask from the most significant bits of the SPFP values. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movemask_ps (__m128 __A) +{ + return __builtin_ia32_movmskps ((__v4sf)__A); +} + +/* Return the contents of the control register. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE unsigned int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_getcsr (void) +{ + return __builtin_ia32_stmxcsr (); +} + +/* Read exception bits from the control register. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE unsigned int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_MM_GET_EXCEPTION_STATE (void) +{ + return _mm_getcsr() & _MM_EXCEPT_MASK; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE unsigned int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_MM_GET_EXCEPTION_MASK (void) +{ + return _mm_getcsr() & _MM_MASK_MASK; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE unsigned int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_MM_GET_ROUNDING_MODE (void) +{ + return _mm_getcsr() & _MM_ROUND_MASK; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE unsigned int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_MM_GET_FLUSH_ZERO_MODE (void) +{ + return _mm_getcsr() & _MM_FLUSH_ZERO_MASK; +} + +/* Set the control register to I. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setcsr (unsigned int __I) +{ + __builtin_ia32_ldmxcsr (__I); +} + +/* Set exception bits in the control register. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_MM_SET_EXCEPTION_STATE(unsigned int __mask) +{ + _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_MM_SET_EXCEPTION_MASK (unsigned int __mask) +{ + _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_MM_SET_ROUNDING_MODE (unsigned int __mode) +{ + _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_MM_SET_FLUSH_ZERO_MODE (unsigned int __mode) +{ + _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode); +} + +/* Create a vector with element 0 as F and the rest zero. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_ss (float __F) +{ + return __extension__ (__m128)(__v4sf){ __F, 0, 0, 0 }; +} + +/* Create a vector with all four elements equal to F. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_ps (float __F) +{ + return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F }; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_ps1 (float __F) +{ + return _mm_set1_ps (__F); +} + +/* Create a vector with element 0 as *P and the rest zero. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load_ss (float const *__P) +{ + return _mm_set_ss (*__P); +} + +/* Create a vector with all four elements equal to *P. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load1_ps (float const *__P) +{ + return _mm_set1_ps (*__P); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load_ps1 (float const *__P) +{ + return _mm_load1_ps (__P); +} + +/* Load four SPFP values from P. The address must be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load_ps (float const *__P) +{ + return (__m128) *(__v4sf *)__P; +} + +/* Load four SPFP values from P. The address need not be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadu_ps (float const *__P) +{ + return (__m128) __builtin_ia32_loadups (__P); +} + +/* Load four SPFP values in reverse order. The address must be aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadr_ps (float const *__P) +{ + __v4sf __tmp = *(__v4sf *)__P; + return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3)); +} + +/* Create the vector [Z Y X W]. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W) +{ + return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z }; +} + +/* Create the vector [W X Y Z]. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_ps (float __Z, float __Y, float __X, float __W) +{ + return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W }; +} + +/* Stores the lower SPFP value. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store_ss (float *__P, __m128 __A) +{ + *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE float __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtss_f32 (__m128 __A) +{ + return __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0); +} + +/* Store four SPFP values. The address must be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store_ps (float *__P, __m128 __A) +{ + *(__v4sf *)__P = (__v4sf)__A; +} + +/* Store four SPFP values. The address need not be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storeu_ps (float *__P, __m128 __A) +{ + __builtin_ia32_storeups (__P, (__v4sf)__A); +} + +/* Store the lower SPFP value across four words. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store1_ps (float *__P, __m128 __A) +{ + __v4sf __va = (__v4sf)__A; + __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0)); + _mm_storeu_ps (__P, __tmp); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store_ps1 (float *__P, __m128 __A) +{ + _mm_store1_ps (__P, __A); +} + +/* Store four SPFP values in reverse order. The address must be aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storer_ps (float *__P, __m128 __A) +{ + __v4sf __va = (__v4sf)__A; + __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3)); + _mm_store_ps (__P, __tmp); +} + +/* Sets the low SPFP value of A from the low value of B. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_move_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B); +} + +/* Extracts one of the four words of A. The selector N must be immediate. */ +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_extract_pi16 (__m64 const __A, int const __N) +{ + return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pextrw (__m64 const __A, int const __N) +{ + return _mm_extract_pi16 (__A, __N); +} +#else +#define _mm_extract_pi16(A, N) __builtin_ia32_vec_ext_v4hi ((__v4hi)(A), (N)) +#define _m_pextrw(A, N) _mm_extract_pi16((A), (N)) +#endif + +/* Inserts word D into one of four words of A. The selector N must be + immediate. */ +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_insert_pi16 (__m64 const __A, int const __D, int const __N) +{ + return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pinsrw (__m64 const __A, int const __D, int const __N) +{ + return _mm_insert_pi16 (__A, __D, __N); +} +#else +#define _mm_insert_pi16(A, D, N) \ + ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(A), (D), (N))) +#define _m_pinsrw(A, D, N) _mm_insert_pi16((A), (D), (N)) +#endif + +/* Compute the element-wise maximum of signed 16-bit values. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_max_pi16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pmaxsw (__m64 __A, __m64 __B) +{ + return _mm_max_pi16 (__A, __B); +} + +/* Compute the element-wise maximum of unsigned 8-bit values. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_max_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pmaxub (__m64 __A, __m64 __B) +{ + return _mm_max_pu8 (__A, __B); +} + +/* Compute the element-wise minimum of signed 16-bit values. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_min_pi16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pminsw (__m64 __A, __m64 __B) +{ + return _mm_min_pi16 (__A, __B); +} + +/* Compute the element-wise minimum of unsigned 8-bit values. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_min_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pminub (__m64 __A, __m64 __B) +{ + return _mm_min_pu8 (__A, __B); +} + +/* Create an 8-bit mask of the signs of 8-bit values. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movemask_pi8 (__m64 __A) +{ + return __builtin_ia32_pmovmskb ((__v8qi)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pmovmskb (__m64 __A) +{ + return _mm_movemask_pi8 (__A); +} + +/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values + in B and produce the high 16 bits of the 32-bit results. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mulhi_pu16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pmulhuw (__m64 __A, __m64 __B) +{ + return _mm_mulhi_pu16 (__A, __B); +} + +/* Return a combination of the four 16-bit values in A. The selector + must be an immediate. */ +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_shuffle_pi16 (__m64 __A, int __N) +{ + return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pshufw (__m64 __A, int __N) +{ + return _mm_shuffle_pi16 (__A, __N); +} +#else +#define _mm_shuffle_pi16(A, N) \ + ((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N))) +#define _m_pshufw(A, N) _mm_shuffle_pi16 ((A), (N)) +#endif + +/* Conditionally store byte elements of A into P. The high bit of each + byte in the selector N determines whether the corresponding byte from + A is stored. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P) +{ + __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_maskmovq (__m64 __A, __m64 __N, char *__P) +{ + _mm_maskmove_si64 (__A, __N, __P); +} + +/* Compute the rounded averages of the unsigned 8-bit values in A and B. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_avg_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pavgb (__m64 __A, __m64 __B) +{ + return _mm_avg_pu8 (__A, __B); +} + +/* Compute the rounded averages of the unsigned 16-bit values in A and B. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_avg_pu16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pavgw (__m64 __A, __m64 __B) +{ + return _mm_avg_pu16 (__A, __B); +} + +/* Compute the sum of the absolute differences of the unsigned 8-bit + values in A and B. Return the value in the lower 16-bit word; the + upper words are cleared. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sad_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psadbw (__m64 __A, __m64 __B) +{ + return _mm_sad_pu8 (__A, __B); +} + +/* Loads one cache line from address P to a location "closer" to the + processor. The selector I specifies the type of prefetch operation. */ +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_prefetch (void *__P, enum _mm_hint __I) +{ + __builtin_prefetch (__P, 0, __I); +} +#else +#define _mm_prefetch(P, I) \ + __builtin_prefetch ((P), 0, (I)) +#endif + +/* Stores the data in A to the address P without polluting the caches. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_stream_pi (__m64 *__P, __m64 __A) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + __builtin_ia32_movntq (__P, __A); +} + +/* Likewise. The address must be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_stream_ps (float *__P, __m128 __A) +{ + __builtin_ia32_movntps (__P, (__v4sf)__A); +} + +/* Guarantees that every preceding store is globally visible before + any subsequent store. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sfence (void) +{ + __builtin_ia32_sfence (); +} + +/* The execution of the next instruction is delayed by an implementation + specific amount of time. The instruction does not modify the + architectural state. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_pause (void) +{ + __asm__ __volatile__ ("rep; nop" : : ); +} +/* APPLE LOCAL end radar 4152603 */ + +/* Transpose the 4x4 matrix composed of row[0-3]. */ +#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ +do { \ + __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \ + __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \ + __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3); \ + __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1); \ + __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \ + (row0) = __builtin_ia32_movlhps (__t0, __t1); \ + (row1) = __builtin_ia32_movhlps (__t1, __t0); \ + (row2) = __builtin_ia32_movlhps (__t2, __t3); \ + (row3) = __builtin_ia32_movhlps (__t3, __t2); \ +} while (0) + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#undef __always_inline__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +/* For backward source compatibility. */ +#include + +#endif /* __SSE__ */ +#endif /* _XMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/libcc_kext.a b/lib/gcc/i686-apple-darwin10/4.2.1/libcc_kext.a new file mode 100644 index 0000000..ddb30bb Binary files /dev/null and b/lib/gcc/i686-apple-darwin10/4.2.1/libcc_kext.a differ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/libgcc.a b/lib/gcc/i686-apple-darwin10/4.2.1/libgcc.a new file mode 100644 index 0000000..9b59e3c Binary files /dev/null and b/lib/gcc/i686-apple-darwin10/4.2.1/libgcc.a differ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/libgcc_eh.a b/lib/gcc/i686-apple-darwin10/4.2.1/libgcc_eh.a new file mode 100644 index 0000000..fdf8ba5 Binary files /dev/null and b/lib/gcc/i686-apple-darwin10/4.2.1/libgcc_eh.a differ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/libgcc_static.a b/lib/gcc/i686-apple-darwin10/4.2.1/libgcc_static.a new file mode 100644 index 0000000..337f66e Binary files /dev/null and b/lib/gcc/i686-apple-darwin10/4.2.1/libgcc_static.a differ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/libgcov.a b/lib/gcc/i686-apple-darwin10/4.2.1/libgcov.a new file mode 100644 index 0000000..506289f Binary files /dev/null and b/lib/gcc/i686-apple-darwin10/4.2.1/libgcov.a differ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/libgomp.a b/lib/gcc/i686-apple-darwin10/4.2.1/libgomp.a new file mode 100644 index 0000000..fcc074d Binary files /dev/null and b/lib/gcc/i686-apple-darwin10/4.2.1/libgomp.a differ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/libgomp.spec b/lib/gcc/i686-apple-darwin10/4.2.1/libgomp.spec new file mode 100644 index 0000000..7102255 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/libgomp.spec @@ -0,0 +1,3 @@ +# This spec file is read by gcc when linking. It is used to specify the +# standard libraries we need in order to link with -fopenmp. +*link_gomp: -lgomp %{static: } diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/libstdc++.dylib b/lib/gcc/i686-apple-darwin10/4.2.1/libstdc++.dylib new file mode 100755 index 0000000..819edae Binary files /dev/null and b/lib/gcc/i686-apple-darwin10/4.2.1/libstdc++.dylib differ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/crt3.o b/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/crt3.o new file mode 100644 index 0000000..eb28ab0 Binary files /dev/null and b/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/crt3.o differ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgcc.a b/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgcc.a new file mode 100644 index 0000000..0318cbe Binary files /dev/null and b/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgcc.a differ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgcc_eh.a b/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgcc_eh.a new file mode 100644 index 0000000..e35687e Binary files /dev/null and b/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgcc_eh.a differ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgcov.a b/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgcov.a new file mode 100644 index 0000000..1d390fa Binary files /dev/null and b/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgcov.a differ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgomp.a b/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgomp.a new file mode 100644 index 0000000..d9081a3 Binary files /dev/null and b/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgomp.a differ diff --git a/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgomp.spec b/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgomp.spec new file mode 100644 index 0000000..7102255 --- /dev/null +++ b/lib/gcc/i686-apple-darwin10/4.2.1/x86_64/libgomp.spec @@ -0,0 +1,3 @@ +# This spec file is read by gcc when linking. It is used to specify the +# standard libraries we need in order to link with -fopenmp. +*link_gomp: -lgomp %{static: } diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/crt3.o b/lib/gcc/i686-apple-darwin11/4.2.1/crt3.o new file mode 100644 index 0000000..8609d11 Binary files /dev/null and b/lib/gcc/i686-apple-darwin11/4.2.1/crt3.o differ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/README b/lib/gcc/i686-apple-darwin11/4.2.1/include/README new file mode 100644 index 0000000..7086a77 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/README @@ -0,0 +1,14 @@ +This README file is copied into the directory for GCC-only header files +when fixincludes is run by the makefile for GCC. + +Many of the files in this directory were automatically edited from the +standard system header files by the fixincludes process. They are +system-specific, and will not work on any other kind of system. They +are also not part of GCC. The reason we have to do this is because +GCC requires ANSI C headers and many vendors supply ANSI-incompatible +headers. + +Because this is an automated process, sometimes headers get "fixed" +that do not, strictly speaking, need a fix. As long as nothing is broken +by the process, it is just an unfortunate collateral inconvenience. +We would like to rectify it, if it is not "too inconvenient". diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/ammintrin.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/ammintrin.h new file mode 100644 index 0000000..8a466d9 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/ammintrin.h @@ -0,0 +1,106 @@ +/* APPLE LOCAL file 5612787 mainline sse4 */ +/* Copyright (C) 2007 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the specification included in the AMD Programmers + Manual Update, version 2.x */ + +#ifndef _AMMINTRIN_H_INCLUDED +#define _AMMINTRIN_H_INCLUDED + +#ifndef __SSE4A__ +# error "SSE4A instruction set not enabled" +#else + +/* We need definitions from the SSE3, SSE2 and SSE header files*/ +#include + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#define __always_inline__ __always_inline__, __nodebug__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +/* APPLE LOCAL begin radar 5618945 */ +#undef __STATIC_INLINE +#ifdef __GNUC_STDC_INLINE__ +#define __STATIC_INLINE __inline +#else +#define __STATIC_INLINE static __inline +#endif +/* APPLE LOCAL end radar 5618945 */ + +__STATIC_INLINE void __attribute__((__always_inline__)) +_mm_stream_sd (double * __P, __m128d __Y) +{ + __builtin_ia32_movntsd (__P, (__v2df) __Y); +} + +__STATIC_INLINE void __attribute__((__always_inline__)) +_mm_stream_ss (float * __P, __m128 __Y) +{ + __builtin_ia32_movntss (__P, (__v4sf) __Y); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_extract_si64 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_extrq ((__v2di) __X, (__v16qi) __Y); +} + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_extracti_si64 (__m128i __X, unsigned const int __I, unsigned const int __L) +{ + return (__m128i) __builtin_ia32_extrqi ((__v2di) __X, __I, __L); +} +#else +#define _mm_extracti_si64(X, I, L) \ + ((__m128i) __builtin_ia32_extrqi ((__v2di)(X), I, L)) +#endif + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_insert_si64 (__m128i __X,__m128i __Y) +{ + return (__m128i) __builtin_ia32_insertq ((__v2di)__X, (__v2di)__Y); +} + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_inserti_si64(__m128i __X, __m128i __Y, unsigned const int __I, unsigned const int __L) +{ + return (__m128i) __builtin_ia32_insertqi ((__v2di)__X, (__v2di)__Y, __I, __L); +} +#else +#define _mm_inserti_si64(X, Y, I, L) \ + ((__m128i) __builtin_ia32_insertqi ((__v2di)(X), (__v2di)(Y), I, L)) +#endif + +#endif /* __SSE4A__ */ + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#undef __always_inline__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +#endif /* _AMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/decfloat.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/decfloat.h new file mode 100644 index 0000000..03e0a7b --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/decfloat.h @@ -0,0 +1,108 @@ +/* Copyright (C) 2005 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* + * Draft C Extension to support decimal floating-pointing arithmetic: + * Characteristics of decimal floating types + */ + +#ifndef _DECFLOAT_H___ +#define _DECFLOAT_H___ + +/* Number of base-FLT_RADIX digits in the significand, p. */ +#undef DEC32_MANT_DIG +#undef DEC64_MANT_DIG +#undef DEC128_MANT_DIG +#define DEC32_MANT_DIG __DEC32_MANT_DIG__ +#define DEC64_MANT_DIG __DEC64_MANT_DIG__ +#define DEC128_MANT_DIG __DEC128_MANT_DIG__ + +/* Minimum exponent. */ +#undef DEC32_MIN_EXP +#undef DEC64_MIN_EXP +#undef DEC128_MIN_EXP +#define DEC32_MIN_EXP __DEC32_MIN_EXP__ +#define DEC64_MIN_EXP __DEC64_MIN_EXP__ +#define DEC128_MIN_EXP __DEC128_MIN_EXP__ + +/* Maximum exponent. */ +#undef DEC32_MAX_EXP +#undef DEC64_MAX_EXP +#undef DEC128_MAX_EXP +#define DEC32_MAX_EXP __DEC32_MAX_EXP__ +#define DEC64_MAX_EXP __DEC64_MAX_EXP__ +#define DEC128_MAX_EXP __DEC128_MAX_EXP__ + +/* Maximum representable finite decimal floating-point number + (there are 6, 15, and 33 9s after the decimal points respectively). */ +#undef DEC32_MAX +#undef DEC64_MAX +#undef DEC128_MAX +#define DEC32_MAX __DEC32_MAX__ +#define DEC64_MAX __DEC64_MAX__ +#define DEC128_MAX __DEC128_MAX__ + +/* The difference between 1 and the least value greater than 1 that is + representable in the given floating point type. */ +#undef DEC32_EPSILON +#undef DEC64_EPSILON +#undef DEC128_EPSILON +#define DEC32_EPSILON __DEC32_EPSILON__ +#define DEC64_EPSILON __DEC64_EPSILON__ +#define DEC128_EPSILON __DEC128_EPSILON__ + +/* Minimum normalized positive floating-point number. */ +#undef DEC32_MIN +#undef DEC64_MIN +#undef DEC128_MIN +#define DEC32_MIN __DEC32_MIN__ +#define DEC64_MIN __DEC64_MIN__ +#define DEC128_MIN __DEC128_MIN__ + +/* Minimum denormalized positive floating-point number. */ +#undef DEC32_DEN +#undef DEC64_DEN +#undef DEC128_DEN +#define DEC32_DEN __DEC32_DEN__ +#define DEC64_DEN __DEC64_DEN__ +#define DEC128_DEN __DEC128_DEN__ + +/* The floating-point expression evaluation method. + -1 indeterminate + 0 evaluate all operations and constants just to the range and + precision of the type + 1 evaluate operations and constants of type _Decimal32 + and _Decimal64 to the range and precision of the _Decimal64 + type, evaluate _Decimal128 operations and constants to the + range and precision of the _Decimal128 type; + 2 evaluate all operations and constants to the range and + precision of the _Decimal128 type. +*/ + +#undef DECFLT_EVAL_METHOD +#define DECFLT_EVAL_METHOD __DECFLT_EVAL_METHOD__ + +#endif /* _DECFLOAT_H___ */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/emmintrin.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/emmintrin.h new file mode 100644 index 0000000..857ea6f --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/emmintrin.h @@ -0,0 +1,1981 @@ +/* APPLE LOCAL file mainline 2005-06-30 Radar 4131077 */ +/* Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef _EMMINTRIN_H_INCLUDED +#define _EMMINTRIN_H_INCLUDED + +#ifdef __SSE2__ +#include + +/* SSE2 */ +typedef double __v2df __attribute__ ((__vector_size__ (16))); +typedef long long __v2di __attribute__ ((__vector_size__ (16))); +typedef int __v4si __attribute__ ((__vector_size__ (16))); +typedef short __v8hi __attribute__ ((__vector_size__ (16))); +typedef char __v16qi __attribute__ ((__vector_size__ (16))); + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__)); +typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__)); + +/* Create a selector for use with the SHUFPD instruction. */ +#define _MM_SHUFFLE2(fp1,fp0) \ + (((fp1) << 1) | (fp0)) + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#define __always_inline__ __always_inline__, __nodebug__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +/* APPLE LOCAL begin radar 5618945 */ +#undef __STATIC_INLINE +#ifdef __GNUC_STDC_INLINE__ +#define __STATIC_INLINE __inline +#else +#define __STATIC_INLINE static __inline +#endif +/* APPLE LOCAL end radar 5618945 */ + +/* APPLE LOCAL begin radar 4152603 */ +/* Create a vector with element 0 as F and the rest zero. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_sd (double __F) +{ + return __extension__ (__m128d){ __F, 0 }; +} + +/* Create a vector with both elements equal to F. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_pd (double __F) +{ + return __extension__ (__m128d){ __F, __F }; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_pd1 (double __F) +{ + return _mm_set1_pd (__F); +} + +/* Create a vector with the lower value X and upper value W. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_pd (double __W, double __X) +{ + return __extension__ (__m128d){ __X, __W }; +} + +/* Create a vector with the lower value W and upper value X. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_pd (double __W, double __X) +{ + return __extension__ (__m128d){ __W, __X }; +} + +/* Create a vector of zeros. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setzero_pd (void) +{ + return __extension__ (__m128d){ 0.0, 0.0 }; +} + +/* Sets the low DPFP value of A from the low value of B. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_move_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B); +} + +/* Load two DPFP values from P. The address must be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load_pd (double const *__P) +{ + return *(__m128d *)__P; +} + +/* Load two DPFP values from P. The address need not be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadu_pd (double const *__P) +{ + return __builtin_ia32_loadupd (__P); +} + +/* Create a vector with all two elements equal to *P. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load1_pd (double const *__P) +{ + return _mm_set1_pd (*__P); +} + +/* Create a vector with element 0 as *P and the rest zero. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load_sd (double const *__P) +{ + return _mm_set_sd (*__P); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load_pd1 (double const *__P) +{ + return _mm_load1_pd (__P); +} + +/* Load two DPFP values in reverse order. The address must be aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadr_pd (double const *__P) +{ + __m128d __tmp = _mm_load_pd (__P); + return __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,1)); +} + +/* Store two DPFP values. The address must be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store_pd (double *__P, __m128d __A) +{ + *(__m128d *)__P = __A; +} + +/* Store two DPFP values. The address need not be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storeu_pd (double *__P, __m128d __A) +{ + __builtin_ia32_storeupd (__P, __A); +} + +/* Stores the lower DPFP value. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store_sd (double *__P, __m128d __A) +{ + *__P = __builtin_ia32_vec_ext_v2df (__A, 0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE double __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsd_f64 (__m128d __A) +{ + return __builtin_ia32_vec_ext_v2df (__A, 0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storel_pd (double *__P, __m128d __A) +{ + _mm_store_sd (__P, __A); +} + +/* Stores the upper DPFP value. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storeh_pd (double *__P, __m128d __A) +{ + *__P = __builtin_ia32_vec_ext_v2df (__A, 1); +} + +/* Store the lower DPFP value across two words. + The address must be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store1_pd (double *__P, __m128d __A) +{ + _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,0))); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store_pd1 (double *__P, __m128d __A) +{ + _mm_store1_pd (__P, __A); +} + +/* Store two DPFP values in reverse order. The address must be aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storer_pd (double *__P, __m128d __A) +{ + _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,1))); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi128_si32 (__m128i __A) +{ + return __builtin_ia32_vec_ext_v4si ((__v4si)__A, 0); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi128_si64 (__m128i __A) +{ + return __builtin_ia32_vec_ext_v2di ((__v2di)__A, 0); +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi128_si64x (__m128i __A) +{ + return __builtin_ia32_vec_ext_v2di ((__v2di)__A, 0); +} +#endif + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_addpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_addsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_subpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_subsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mul_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_mulpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mul_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_mulsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_div_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_divpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_div_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_divsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sqrt_pd (__m128d __A) +{ + return (__m128d)__builtin_ia32_sqrtpd ((__v2df)__A); +} + +/* Return pair {sqrt (A[0), B[1]}. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sqrt_sd (__m128d __A, __m128d __B) +{ + __v2df __tmp = __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B); + return (__m128d)__builtin_ia32_sqrtsd ((__v2df)__tmp); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_min_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_minpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_min_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_minsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_max_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_maxpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_max_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_maxsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_and_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_andpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_andnot_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_andnpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_or_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_orpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_xor_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_xorpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpeqpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmplt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpltpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmple_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmplepd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpgtpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpge_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpgepd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpneq_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpneqpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnlt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnltpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnle_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnlepd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpngt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpngtpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnge_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpngepd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpord_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpordpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpunord_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpunordpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpeqsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmplt_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpltsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmple_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmplesd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmpltsd ((__v2df) __B, + (__v2df) + __A)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpge_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmplesd ((__v2df) __B, + (__v2df) + __A)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpneq_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpneqsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnlt_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnltsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnle_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnlesd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpngt_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmpnltsd ((__v2df) __B, + (__v2df) + __A)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnge_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmpnlesd ((__v2df) __B, + (__v2df) + __A)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpord_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpordsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpunord_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpunordsd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comieq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdeq ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comilt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdlt ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comile_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdle ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comigt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdgt ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comige_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdge ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comineq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdneq ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomieq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdeq ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomilt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdlt ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomile_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdle ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomigt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdgt ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomige_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdge ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomineq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdneq ((__v2df)__A, (__v2df)__B); +} + +/* Create a vector of Qi, where i is the element number. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_epi64x (long long __q1, long long __q0) +{ + return __extension__ (__m128i)(__v2di){ __q0, __q1 }; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_epi64 (__m64 __q1, __m64 __q0) +{ + return _mm_set_epi64x ((long long)__q1, (long long)__q0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_epi32 (int __q3, int __q2, int __q1, int __q0) +{ + return __extension__ (__m128i)(__v4si){ __q0, __q1, __q2, __q3 }; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_epi16 (short __q7, short __q6, short __q5, short __q4, + short __q3, short __q2, short __q1, short __q0) +{ + return __extension__ (__m128i)(__v8hi){ + __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 }; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_epi8 (char __q15, char __q14, char __q13, char __q12, + char __q11, char __q10, char __q09, char __q08, + char __q07, char __q06, char __q05, char __q04, + char __q03, char __q02, char __q01, char __q00) +{ + return __extension__ (__m128i)(__v16qi){ + __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, + __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15 + }; +} + +/* APPLE LOCAL begin 4220129 */ +/* functions moved to end of file */ +/* APPLE LOCAL end 4220129 */ + +/* Create a vector of Qi, where i is the element number. + The parameter order is reversed from the _mm_set_epi* functions. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_epi64 (__m64 __q0, __m64 __q1) +{ + return _mm_set_epi64 (__q1, __q0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3) +{ + return _mm_set_epi32 (__q3, __q2, __q1, __q0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3, + short __q4, short __q5, short __q6, short __q7) +{ + return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03, + char __q04, char __q05, char __q06, char __q07, + char __q08, char __q09, char __q10, char __q11, + char __q12, char __q13, char __q14, char __q15) +{ + return _mm_set_epi8 (__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08, + __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00); +} + +/* Create a vector with element 0 as *P and the rest zero. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load_si128 (__m128i const *__P) +{ + return *__P; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadu_si128 (__m128i const *__P) +{ + return (__m128i) __builtin_ia32_loaddqu ((char const *)__P); +} + +/* APPLE LOCAL begin 4099020 */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadl_epi64 (__m128i const *__P) +{ + return (__m128i)__builtin_ia32_loadlv4si ((__v2si *)__P); +} +/* APPLE LOCAL end 4099020 */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store_si128 (__m128i *__P, __m128i __B) +{ + *__P = __B; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storeu_si128 (__m128i *__P, __m128i __B) +{ + __builtin_ia32_storedqu ((char *)__P, (__v16qi)__B); +} + +/* APPLE LOCAL begin 4099020 */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storel_epi64 (__m128i *__P, __m128i __B) +{ + __builtin_ia32_storelv4si ((__v2si *)__P, __B); +} +/* APPLE LOCAL end 4099020 */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movepi64_pi64 (__m128i __B) +{ + return (__m64) __builtin_ia32_vec_ext_v2di ((__v2di)__B, 0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movpi64_epi64 (__m64 __A) +{ + return _mm_set_epi64 ((__m64)0LL, __A); +} + +/* APPLE LOCAL begin 4099020 */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_move_epi64 (__m128i __A) +{ + return (__m128i)__builtin_ia32_movqv4si ((__v4si)__A) ; +} +/* APPLE LOCAL end 4099020 */ + +/* Create a vector of zeros. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setzero_si128 (void) +{ + return __extension__ (__m128i)(__v4si){ 0, 0, 0, 0 }; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtepi32_pd (__m128i __A) +{ + return (__m128d)__builtin_ia32_cvtdq2pd ((__v4si) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtepi32_ps (__m128i __A) +{ + return (__m128)__builtin_ia32_cvtdq2ps ((__v4si) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpd_epi32 (__m128d __A) +{ + return (__m128i)__builtin_ia32_cvtpd2dq ((__v2df) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpd_pi32 (__m128d __A) +{ + return (__m64)__builtin_ia32_cvtpd2pi ((__v2df) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpd_ps (__m128d __A) +{ + return (__m128)__builtin_ia32_cvtpd2ps ((__v2df) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttpd_epi32 (__m128d __A) +{ + return (__m128i)__builtin_ia32_cvttpd2dq ((__v2df) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttpd_pi32 (__m128d __A) +{ + return (__m64)__builtin_ia32_cvttpd2pi ((__v2df) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpi32_pd (__m64 __A) +{ + return (__m128d)__builtin_ia32_cvtpi2pd ((__v2si) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtps_epi32 (__m128 __A) +{ + return (__m128i)__builtin_ia32_cvtps2dq ((__v4sf) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttps_epi32 (__m128 __A) +{ + return (__m128i)__builtin_ia32_cvttps2dq ((__v4sf) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtps_pd (__m128 __A) +{ + return (__m128d)__builtin_ia32_cvtps2pd ((__v4sf) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsd_si32 (__m128d __A) +{ + return __builtin_ia32_cvtsd2si ((__v2df) __A); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsd_si64 (__m128d __A) +{ + return __builtin_ia32_cvtsd2si64 ((__v2df) __A); +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsd_si64x (__m128d __A) +{ + return __builtin_ia32_cvtsd2si64 ((__v2df) __A); +} +#endif + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttsd_si32 (__m128d __A) +{ + return __builtin_ia32_cvttsd2si ((__v2df) __A); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttsd_si64 (__m128d __A) +{ + return __builtin_ia32_cvttsd2si64 ((__v2df) __A); +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttsd_si64x (__m128d __A) +{ + return __builtin_ia32_cvttsd2si64 ((__v2df) __A); +} +#endif + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsd_ss (__m128 __A, __m128d __B) +{ + return (__m128)__builtin_ia32_cvtsd2ss ((__v4sf) __A, (__v2df) __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi32_sd (__m128d __A, int __B) +{ + return (__m128d)__builtin_ia32_cvtsi2sd ((__v2df) __A, __B); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64_sd (__m128d __A, long long __B) +{ + return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B); +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64x_sd (__m128d __A, long long __B) +{ + return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B); +} +#endif + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtss_sd (__m128d __A, __m128 __B) +{ + return (__m128d)__builtin_ia32_cvtss2sd ((__v2df) __A, (__v4sf)__B); +} + +/* APPLE LOCAL 5814283 */ +#define _mm_shuffle_pd(__A, __B, __C) ((__m128d)__builtin_ia32_shufpd ((__v2df)(__A), (__v2df)(__B), (__C))) + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_unpckhpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_unpcklpd ((__v2df)__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadh_pd (__m128d __A, double const *__B) +{ + return (__m128d)__builtin_ia32_loadhpd ((__v2df)__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadl_pd (__m128d __A, double const *__B) +{ + return (__m128d)__builtin_ia32_loadlpd ((__v2df)__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movemask_pd (__m128d __A) +{ + return __builtin_ia32_movmskpd ((__v2df)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_packs_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_packsswb128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_packs_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_packssdw128 ((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_packus_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_packuswb128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhbw128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhwd128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhdq128 ((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhqdq128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpcklbw128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpcklwd128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckldq128 ((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpcklqdq128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddd128 ((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddq128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_adds_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddsb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_adds_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddsw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_adds_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddusb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_adds_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddusw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubd128 ((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubq128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_subs_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubsb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_subs_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubsw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_subs_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubusb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_subs_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubusw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_madd_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmaddwd128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mulhi_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmulhw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mullo_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmullw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mul_su32 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pmuludq ((__v2si)__A, (__v2si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mul_epu32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmuludq128 ((__v4si)__A, (__v4si)__B); +} + +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_slli_epi16 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psllwi128 ((__v8hi)__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_slli_epi32 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_pslldi128 ((__v4si)__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_slli_epi64 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psllqi128 ((__v2di)__A, __B); +} +#else +#define _mm_slli_epi16(__A, __B) \ + ((__m128i)__builtin_ia32_psllwi128 ((__v8hi)(__A), __B)) +#define _mm_slli_epi32(__A, __B) \ + ((__m128i)__builtin_ia32_pslldi128 ((__v8hi)(__A), __B)) +#define _mm_slli_epi64(__A, __B) \ + ((__m128i)__builtin_ia32_psllqi128 ((__v8hi)(__A), __B)) +#endif + +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srai_epi16 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrawi128 ((__v8hi)__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srai_epi32 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psradi128 ((__v4si)__A, __B); +} +#else +#define _mm_srai_epi16(__A, __B) \ + ((__m128i)__builtin_ia32_psrawi128 ((__v8hi)(__A), __B)) +#define _mm_srai_epi32(__A, __B) \ + ((__m128i)__builtin_ia32_psradi128 ((__v8hi)(__A), __B)) +#endif + +#if 0 +static __m128i __attribute__((__always_inline__)) +_mm_srli_si128 (__m128i __A, int __B) +{ + return ((__m128i)__builtin_ia32_psrldqi128 (__A, __B * 8)); +} + +static __m128i __attribute__((__always_inline__)) +_mm_srli_si128 (__m128i __A, int __B) +{ + return ((__m128i)__builtin_ia32_pslldqi128 (__A, __B * 8)); +} +#else +/* APPLE LOCAL begin 5919583 */ +#define _mm_srli_si128 (__m128i)__builtin_ia32_psrldqi128_byteshift +#define _mm_slli_si128 (__m128i)__builtin_ia32_pslldqi128_byteshift +/* APPLE LOCAL end 5919583 */ +#endif + +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srli_epi16 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrlwi128 ((__v8hi)__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srli_epi32 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrldi128 ((__v4si)__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srli_epi64 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrlqi128 ((__v2di)__A, __B); +} +#else +#define _mm_srli_epi16(__A, __B) \ + ((__m128i)__builtin_ia32_psrlwi128 ((__v8hi)(__A), __B)) +#define _mm_srli_epi32(__A, __B) \ + ((__m128i)__builtin_ia32_psrldi128 ((__v4si)(__A), __B)) +#define _mm_srli_epi64(__A, __B) \ + ((__m128i)__builtin_ia32_psrlqi128 ((__v4si)(__A), __B)) +#endif + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sll_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psllw128((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sll_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pslld128((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sll_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psllq128((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sra_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psraw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sra_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrad128 ((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srl_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrlw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srl_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrld128 ((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srl_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrlq128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_and_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pand128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_andnot_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pandn128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_or_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_por128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_xor_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pxor128 ((__v2di)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpeqb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpeqw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpeqd128 ((__v4si)__A, (__v4si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmplt_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__B, (__v16qi)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmplt_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__B, (__v8hi)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmplt_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__B, (__v4si)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__A, (__v4si)__B); +} + +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_extract_epi16 (__m128i const __A, int const __N) +{ + return __builtin_ia32_vec_ext_v8hi ((__v8hi)__A, __N); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_insert_epi16 (__m128i const __A, int const __D, int const __N) +{ + return (__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)__A, __D, __N); +} +#else +#define _mm_extract_epi16(A, N) \ + ((int) __builtin_ia32_vec_ext_v8hi ((__v8hi)(A), (N))) +#define _mm_insert_epi16(A, D, N) \ + ((__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)(A), (D), (N))) +#endif + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_max_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmaxsw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_max_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmaxub128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_min_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pminsw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_min_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pminub128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movemask_epi8 (__m128i __A) +{ + return __builtin_ia32_pmovmskb128 ((__v16qi)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mulhi_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmulhuw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin 5814283 */ +#define _mm_shufflehi_epi16(__A, __B) ((__m128i)__builtin_ia32_pshufhw ((__v8hi)(__A), __B)) +#define _mm_shufflelo_epi16(__A, __B) ((__m128i)__builtin_ia32_pshuflw ((__v8hi)(__A), __B)) +#define _mm_shuffle_epi32(__A, __B) ((__m128i)__builtin_ia32_pshufd ((__v4si)(__A), __B)) +/* APPLE LOCAL end 5814283 */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C) +{ + __builtin_ia32_maskmovdqu ((__v16qi)__A, (__v16qi)__B, __C); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_avg_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pavgb128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_avg_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pavgw128 ((__v8hi)__A, (__v8hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sad_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psadbw128 ((__v16qi)__A, (__v16qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_stream_si32 (int *__A, int __B) +{ + __builtin_ia32_movnti (__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_stream_si128 (__m128i *__A, __m128i __B) +{ + __builtin_ia32_movntdq ((__v2di *)__A, (__v2di)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_stream_pd (double *__A, __m128d __B) +{ + __builtin_ia32_movntpd (__A, (__v2df)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_clflush (void const *__A) +{ + __builtin_ia32_clflush (__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_lfence (void) +{ + __builtin_ia32_lfence (); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mfence (void) +{ + __builtin_ia32_mfence (); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi32_si128 (int __A) +{ + return _mm_set_epi32 (0, 0, 0, __A); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64_si128 (long long __A) +{ + return _mm_set_epi64x (0, __A); +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64x_si128 (long long __A) +{ + return _mm_set_epi64x (0, __A); +} +#endif + +/* Casts between various SP, DP, INT vector types. Note that these do no + conversion of values, they just change the type. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_castpd_ps(__m128d __A) +{ + return (__m128) __A; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_castpd_si128(__m128d __A) +{ + return (__m128i) __A; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_castps_pd(__m128 __A) +{ + return (__m128d) __A; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_castps_si128(__m128 __A) +{ + return (__m128i) __A; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_castsi128_ps(__m128i __A) +{ + return (__m128) __A; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_castsi128_pd(__m128i __A) +{ + return (__m128d) __A; +} +/* APPLE LOCAL end radar 4152603 */ + +/* APPLE LOCAL begin 4220129, 4286110 */ +/* Set all of the elements of the vector to A. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_epi64x (long long __A) +{ + return _mm_set_epi64x (__A, __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_epi64 (__m64 __A) +{ + return _mm_set_epi64 (__A, __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_epi32 (int __A) +{ + return _mm_set_epi32 (__A, __A, __A, __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_epi16 (short __A) +{ + __m128i temp, temp2, temp3; + temp = _mm_cvtsi32_si128((int)__A); + temp2 = _mm_unpacklo_epi16(temp, temp); + temp3 = _mm_shuffle_epi32(temp2, 0); + return temp3; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_epi8 (char __A) +{ + __m128i temp, temp2, temp3, temp4; + temp = _mm_cvtsi32_si128 ((int)__A); + temp2 = _mm_unpacklo_epi8 (temp, temp); + temp3 = _mm_unpacklo_epi8 (temp2, temp2); + temp4 = _mm_shuffle_epi32 (temp3, 0); + return temp4; +} +/* APPLE LOCAL end 4220129, 4286110 */ + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#undef __always_inline__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +#endif /* __SSE2__ */ + +#endif /* _EMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/fenv.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/fenv.h new file mode 100644 index 0000000..55f9eaf --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/fenv.h @@ -0,0 +1,18 @@ +/* DO NOT EDIT THIS FILE. + + It has been auto-edited by fixincludes from: + + "/usr/include/fenv.h" + + This had to be done to correct non-standard usages in the + original, manufacturer supplied header file. */ + +/* This file is a backwards compability hack to allow + no-trapping-math to be the default. */ +#ifndef _DARWIN_FENV_H_WRAPPER +#if defined(__GNUC__) && __GNUC__ >= 4 +#pragma GCC fenv +#endif +#include_next +#define _DARWIN_FENV_H_WRAPPER +#endif /* _DARWIN_FENV_H_WRAPPER */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/float.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/float.h new file mode 100644 index 0000000..1337f6b --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/float.h @@ -0,0 +1,164 @@ +/* Copyright (C) 2002 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* + * ISO C Standard: 5.2.4.2.2 Characteristics of floating types + */ + +#ifndef _FLOAT_H___ +#define _FLOAT_H___ + +/* Radix of exponent representation, b. */ +#undef FLT_RADIX +#define FLT_RADIX __FLT_RADIX__ + +/* Number of base-FLT_RADIX digits in the significand, p. */ +#undef FLT_MANT_DIG +#undef DBL_MANT_DIG +#undef LDBL_MANT_DIG +#define FLT_MANT_DIG __FLT_MANT_DIG__ +#define DBL_MANT_DIG __DBL_MANT_DIG__ +#define LDBL_MANT_DIG __LDBL_MANT_DIG__ + +/* Number of decimal digits, q, such that any floating-point number with q + decimal digits can be rounded into a floating-point number with p radix b + digits and back again without change to the q decimal digits, + + p * log10(b) if b is a power of 10 + floor((p - 1) * log10(b)) otherwise +*/ +#undef FLT_DIG +#undef DBL_DIG +#undef LDBL_DIG +#define FLT_DIG __FLT_DIG__ +#define DBL_DIG __DBL_DIG__ +#define LDBL_DIG __LDBL_DIG__ + +/* Minimum int x such that FLT_RADIX**(x-1) is a normalized float, emin */ +#undef FLT_MIN_EXP +#undef DBL_MIN_EXP +#undef LDBL_MIN_EXP +#define FLT_MIN_EXP __FLT_MIN_EXP__ +#define DBL_MIN_EXP __DBL_MIN_EXP__ +#define LDBL_MIN_EXP __LDBL_MIN_EXP__ + +/* Minimum negative integer such that 10 raised to that power is in the + range of normalized floating-point numbers, + + ceil(log10(b) * (emin - 1)) +*/ +#undef FLT_MIN_10_EXP +#undef DBL_MIN_10_EXP +#undef LDBL_MIN_10_EXP +#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__ +#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__ +#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__ + +/* Maximum int x such that FLT_RADIX**(x-1) is a representable float, emax. */ +#undef FLT_MAX_EXP +#undef DBL_MAX_EXP +#undef LDBL_MAX_EXP +#define FLT_MAX_EXP __FLT_MAX_EXP__ +#define DBL_MAX_EXP __DBL_MAX_EXP__ +#define LDBL_MAX_EXP __LDBL_MAX_EXP__ + +/* Maximum integer such that 10 raised to that power is in the range of + representable finite floating-point numbers, + + floor(log10((1 - b**-p) * b**emax)) +*/ +#undef FLT_MAX_10_EXP +#undef DBL_MAX_10_EXP +#undef LDBL_MAX_10_EXP +#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__ +#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__ +#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__ + +/* Maximum representable finite floating-point number, + + (1 - b**-p) * b**emax +*/ +#undef FLT_MAX +#undef DBL_MAX +#undef LDBL_MAX +#define FLT_MAX __FLT_MAX__ +#define DBL_MAX __DBL_MAX__ +#define LDBL_MAX __LDBL_MAX__ + +/* The difference between 1 and the least value greater than 1 that is + representable in the given floating point type, b**1-p. */ +#undef FLT_EPSILON +#undef DBL_EPSILON +#undef LDBL_EPSILON +#define FLT_EPSILON __FLT_EPSILON__ +#define DBL_EPSILON __DBL_EPSILON__ +#define LDBL_EPSILON __LDBL_EPSILON__ + +/* Minimum normalized positive floating-point number, b**(emin - 1). */ +#undef FLT_MIN +#undef DBL_MIN +#undef LDBL_MIN +#define FLT_MIN __FLT_MIN__ +#define DBL_MIN __DBL_MIN__ +#define LDBL_MIN __LDBL_MIN__ + +/* Addition rounds to 0: zero, 1: nearest, 2: +inf, 3: -inf, -1: unknown. */ +/* APPLE LOCAL begin 3399553 */ +/* This changes with calls to fesetround in . */ +#undef FLT_ROUNDS +#define FLT_ROUNDS (__builtin_flt_rounds ()) +/* APPLE LOCAL end 3399553 */ + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +/* The floating-point expression evaluation method. + -1 indeterminate + 0 evaluate all operations and constants just to the range and + precision of the type + 1 evaluate operations and constants of type float and double + to the range and precision of the double type, evaluate + long double operations and constants to the range and + precision of the long double type + 2 evaluate all operations and constants to the range and + precision of the long double type + + ??? This ought to change with the setting of the fp control word; + the value provided by the compiler assumes the widest setting. */ +#undef FLT_EVAL_METHOD +#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__ + +/* Number of decimal digits, n, such that any floating-point number in the + widest supported floating type with pmax radix b digits can be rounded + to a floating-point number with n decimal digits and back again without + change to the value, + + pmax * log10(b) if b is a power of 10 + ceil(1 + pmax * log10(b)) otherwise +*/ +#undef DECIMAL_DIG +#define DECIMAL_DIG __DECIMAL_DIG__ + +#endif /* C99 */ +#endif /* _FLOAT_H___ */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/iso646.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/iso646.h new file mode 100644 index 0000000..445d372 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/iso646.h @@ -0,0 +1,48 @@ +/* Copyright (C) 1997, 1999 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* + * ISO C Standard: 7.9 Alternative spellings + */ + +#ifndef _ISO646_H +#define _ISO646_H + +#ifndef __cplusplus +#define and && +#define and_eq &= +#define bitand & +#define bitor | +#define compl ~ +#define not ! +#define not_eq != +#define or || +#define or_eq |= +#define xor ^ +#define xor_eq ^= +#endif + +#endif diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/limits.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/limits.h new file mode 100644 index 0000000..16417a2 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/limits.h @@ -0,0 +1,118 @@ +/* This administrivia gets added to the beginning of limits.h + if the system has its own version of limits.h. */ + +/* APPLE LOCAL begin 4401222 */ +#ifndef _LIBC_LIMITS_H_ +/* Use "..." so that we find syslimits.h only in this same directory. */ +#include "syslimits.h" +#endif +#ifdef _GCC_NEXT_LIMITS_H +#include_next +#undef _GCC_NEXT_LIMITS_H +#endif +/* APPLE LOCAL end 4401222 */ +#ifndef _LIMITS_H___ +#define _LIMITS_H___ + +/* Number of bits in a `char'. */ +#undef CHAR_BIT +#define CHAR_BIT __CHAR_BIT__ + +/* Maximum length of a multibyte character. */ +#ifndef MB_LEN_MAX +#define MB_LEN_MAX 1 +#endif + +/* Minimum and maximum values a `signed char' can hold. */ +#undef SCHAR_MIN +#define SCHAR_MIN (-SCHAR_MAX - 1) +#undef SCHAR_MAX +#define SCHAR_MAX __SCHAR_MAX__ + +/* Maximum value an `unsigned char' can hold. (Minimum is 0). */ +#undef UCHAR_MAX +#if __SCHAR_MAX__ == __INT_MAX__ +# define UCHAR_MAX (SCHAR_MAX * 2U + 1U) +#else +# define UCHAR_MAX (SCHAR_MAX * 2 + 1) +#endif + +/* Minimum and maximum values a `char' can hold. */ +#ifdef __CHAR_UNSIGNED__ +# undef CHAR_MIN +# if __SCHAR_MAX__ == __INT_MAX__ +# define CHAR_MIN 0U +# else +# define CHAR_MIN 0 +# endif +# undef CHAR_MAX +# define CHAR_MAX UCHAR_MAX +#else +# undef CHAR_MIN +# define CHAR_MIN SCHAR_MIN +# undef CHAR_MAX +# define CHAR_MAX SCHAR_MAX +#endif + +/* Minimum and maximum values a `signed short int' can hold. */ +#undef SHRT_MIN +#define SHRT_MIN (-SHRT_MAX - 1) +#undef SHRT_MAX +#define SHRT_MAX __SHRT_MAX__ + +/* Maximum value an `unsigned short int' can hold. (Minimum is 0). */ +#undef USHRT_MAX +#if __SHRT_MAX__ == __INT_MAX__ +# define USHRT_MAX (SHRT_MAX * 2U + 1U) +#else +# define USHRT_MAX (SHRT_MAX * 2 + 1) +#endif + +/* Minimum and maximum values a `signed int' can hold. */ +#undef INT_MIN +#define INT_MIN (-INT_MAX - 1) +#undef INT_MAX +#define INT_MAX __INT_MAX__ + +/* Maximum value an `unsigned int' can hold. (Minimum is 0). */ +#undef UINT_MAX +#define UINT_MAX (INT_MAX * 2U + 1U) + +/* Minimum and maximum values a `signed long int' can hold. + (Same as `int'). */ +#undef LONG_MIN +#define LONG_MIN (-LONG_MAX - 1L) +#undef LONG_MAX +#define LONG_MAX __LONG_MAX__ + +/* Maximum value an `unsigned long int' can hold. (Minimum is 0). */ +#undef ULONG_MAX +#define ULONG_MAX (LONG_MAX * 2UL + 1UL) + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +/* Minimum and maximum values a `signed long long int' can hold. */ +# undef LLONG_MIN +# define LLONG_MIN (-LLONG_MAX - 1LL) +# undef LLONG_MAX +# define LLONG_MAX __LONG_LONG_MAX__ + +/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */ +# undef ULLONG_MAX +# define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL) +#endif + +#if defined (__GNU_LIBRARY__) ? defined (__USE_GNU) : !defined (__STRICT_ANSI__) +/* Minimum and maximum values a `signed long long int' can hold. */ +# undef LONG_LONG_MIN +# define LONG_LONG_MIN (-LONG_LONG_MAX - 1LL) +# undef LONG_LONG_MAX +# define LONG_LONG_MAX __LONG_LONG_MAX__ + +/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */ +# undef ULONG_LONG_MAX +# define ULONG_LONG_MAX (LONG_LONG_MAX * 2ULL + 1ULL) +#endif + +#endif /* _LIMITS_H___ */ +/* APPLE LOCAL begin 4401222 */ +/* APPLE LOCAL end 4401222 */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/mm3dnow.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/mm3dnow.h new file mode 100644 index 0000000..7fdc6dc --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/mm3dnow.h @@ -0,0 +1,220 @@ +/* Copyright (C) 2004 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the mm3dnow.h (of supposedly AMD origin) included with + MSVC 7.1. */ + +#ifndef _MM3DNOW_H_INCLUDED +#define _MM3DNOW_H_INCLUDED + +#ifdef __3dNOW__ + +#include + +/* Internal data types for implementing the intrinsics. */ +typedef float __v2sf __attribute__ ((__vector_size__ (8))); + +static __inline void +_m_femms (void) +{ + __builtin_ia32_femms(); +} + +static __inline __m64 +_m_pavgusb (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pavgusb ((__v8qi)__A, (__v8qi)__B); +} + +static __inline __m64 +_m_pf2id (__m64 __A) +{ + return (__m64)__builtin_ia32_pf2id ((__v2sf)__A); +} + +static __inline __m64 +_m_pfacc (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfacc ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfadd (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfadd ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfcmpeq (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfcmpeq ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfcmpge (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfcmpge ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfcmpgt (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfcmpgt ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfmax (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfmax ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfmin (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfmin ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfmul (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfmul ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfrcp (__m64 __A) +{ + return (__m64)__builtin_ia32_pfrcp ((__v2sf)__A); +} + +static __inline __m64 +_m_pfrcpit1 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfrcpit1 ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfrcpit2 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfrcpit2 ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfrsqrt (__m64 __A) +{ + return (__m64)__builtin_ia32_pfrsqrt ((__v2sf)__A); +} + +static __inline __m64 +_m_pfrsqit1 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfrsqit1 ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfsub (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfsub ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfsubr (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfsubr ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pi2fd (__m64 __A) +{ + return (__m64)__builtin_ia32_pi2fd ((__v2si)__A); +} + +static __inline __m64 +_m_pmulhrw (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pmulhrw ((__v4hi)__A, (__v4hi)__B); +} + +static __inline void +_m_prefetch (void *__P) +{ + __builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */); +} + +static __inline void +_m_prefetchw (void *__P) +{ + __builtin_prefetch (__P, 1, 3 /* _MM_HINT_T0 */); +} + +static __inline __m64 +_m_from_float (float __A) +{ + return (__m64)(__v2sf){ __A, 0 }; +} + +static __inline float +_m_to_float (__m64 __A) +{ + union { __v2sf v; float a[2]; } __tmp = { (__v2sf)__A }; + return __tmp.a[0]; +} + +#ifdef __3dNOW_A__ + +static __inline __m64 +_m_pf2iw (__m64 __A) +{ + return (__m64)__builtin_ia32_pf2iw ((__v2sf)__A); +} + +static __inline __m64 +_m_pfnacc (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfnacc ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pfpnacc (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfpnacc ((__v2sf)__A, (__v2sf)__B); +} + +static __inline __m64 +_m_pi2fw (__m64 __A) +{ + return (__m64)__builtin_ia32_pi2fw ((__v2si)__A); +} + +static __inline __m64 +_m_pswapd (__m64 __A) +{ + return (__m64)__builtin_ia32_pswapdsf ((__v2sf)__A); +} + +#endif /* __3dNOW_A__ */ +#endif /* __3dNOW__ */ + +#endif /* _MM3DNOW_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/mm_malloc.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/mm_malloc.h new file mode 100644 index 0000000..20d7f5e --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/mm_malloc.h @@ -0,0 +1,77 @@ +/* Copyright (C) 2004 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +#ifndef _MM_MALLOC_H_INCLUDED +#define _MM_MALLOC_H_INCLUDED + +#include +#include + +static __inline__ void* +_mm_malloc (size_t size, size_t align) +{ + void * malloc_ptr; + void * aligned_ptr; + + /* Error if align is not a power of two. */ + if (align & (align - 1)) + { + errno = EINVAL; + return ((void*) 0); + } + + if (size == 0) + return ((void *) 0); + + /* Assume malloc'd pointer is aligned at least to sizeof (void*). + If necessary, add another sizeof (void*) to store the value + returned by malloc. Effectively this enforces a minimum alignment + of sizeof double. */ + if (align < 2 * sizeof (void *)) + align = 2 * sizeof (void *); + + malloc_ptr = malloc (size + align); + if (!malloc_ptr) + return ((void *) 0); + + /* Align We have at least sizeof (void *) space below malloc'd ptr. */ + aligned_ptr = (void *) (((size_t) malloc_ptr + align) + & ~((size_t) (align) - 1)); + + /* Store the original pointer just before p. */ + ((void **) aligned_ptr) [-1] = malloc_ptr; + + return aligned_ptr; +} + +static __inline__ void +_mm_free (void * aligned_ptr) +{ + if (aligned_ptr) + free (((void **) aligned_ptr) [-1]); +} + +#endif /* _MM_MALLOC_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/mmintrin.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/mmintrin.h new file mode 100644 index 0000000..64db058 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/mmintrin.h @@ -0,0 +1,1219 @@ +/* APPLE LOCAL file mainline 2005-06-30 Radar 4131077 */ +/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 + Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef _MMINTRIN_H_INCLUDED +#define _MMINTRIN_H_INCLUDED + +#ifndef __MMX__ +# error "MMX instruction set not enabled" +#else +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +/* APPLE LOCAL 4505813 */ +typedef long long __m64 __attribute__ ((__vector_size__ (8), __may_alias__)); + +/* Internal data types for implementing the intrinsics. */ +typedef int __v2si __attribute__ ((__vector_size__ (8))); +typedef short __v4hi __attribute__ ((__vector_size__ (8))); +typedef char __v8qi __attribute__ ((__vector_size__ (8))); + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#define __always_inline__ __always_inline__, __nodebug__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +/* APPLE LOCAL begin radar 5618945 */ +#undef __STATIC_INLINE +#ifdef __GNUC_STDC_INLINE__ +#define __STATIC_INLINE __inline +#else +#define __STATIC_INLINE static __inline +#endif +/* APPLE LOCAL end radar 5618945 */ + +/* Empty the multimedia state. */ +/* APPLE LOCAL begin radar 4152603 */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_empty (void) +{ + __builtin_ia32_emms (); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_empty (void) +{ + _mm_empty (); +} + +/* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi32_si64 (int __i) +{ + return (__m64) __builtin_ia32_vec_init_v2si (__i, 0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_from_int (int __i) +{ + return _mm_cvtsi32_si64 (__i); +} + +#ifdef __x86_64__ +/* Convert I to a __m64 object. */ + +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_from_int64 (long long __i) +{ + return (__m64) __i; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64_m64 (long long __i) +{ + return (__m64) __i; +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64x_si64 (long long __i) +{ + return (__m64) __i; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_pi64x (long long __i) +{ + return (__m64) __i; +} +#endif + +/* Convert the lower 32 bits of the __m64 object into an integer. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64_si32 (__m64 __i) +{ + return __builtin_ia32_vec_ext_v2si ((__v2si)__i, 0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_to_int (__m64 __i) +{ + return _mm_cvtsi64_si32 (__i); +} + +#ifdef __x86_64__ +/* Convert the __m64 object to a 64bit integer. */ + +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_to_int64 (__m64 __i) +{ + return (long long)__i; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtm64_si64 (__m64 __i) +{ + return (long long)__i; +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64_si64x (__m64 __i) +{ + return (long long)__i; +} +#endif + +/* Pack the four 16-bit values from M1 into the lower four 8-bit values of + the result, and the four 16-bit values from M2 into the upper four 8-bit + values of the result, all with signed saturation. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_packs_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_packsswb ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_packsswb (__m64 __m1, __m64 __m2) +{ + return _mm_packs_pi16 (__m1, __m2); +} + +/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of + the result, and the two 32-bit values from M2 into the upper two 16-bit + values of the result, all with signed saturation. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_packs_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_packssdw ((__v2si)__m1, (__v2si)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_packssdw (__m64 __m1, __m64 __m2) +{ + return _mm_packs_pi32 (__m1, __m2); +} + +/* Pack the four 16-bit values from M1 into the lower four 8-bit values of + the result, and the four 16-bit values from M2 into the upper four 8-bit + values of the result, all with unsigned saturation. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_packs_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_packuswb ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_packuswb (__m64 __m1, __m64 __m2) +{ + return _mm_packs_pu16 (__m1, __m2); +} + +/* Interleave the four 8-bit values from the high half of M1 with the four + 8-bit values from the high half of M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpckhbw ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_punpckhbw (__m64 __m1, __m64 __m2) +{ + return _mm_unpackhi_pi8 (__m1, __m2); +} + +/* Interleave the two 16-bit values from the high half of M1 with the two + 16-bit values from the high half of M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpckhwd ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_punpckhwd (__m64 __m1, __m64 __m2) +{ + return _mm_unpackhi_pi16 (__m1, __m2); +} + +/* Interleave the 32-bit value from the high half of M1 with the 32-bit + value from the high half of M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpckhdq ((__v2si)__m1, (__v2si)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_punpckhdq (__m64 __m1, __m64 __m2) +{ + return _mm_unpackhi_pi32 (__m1, __m2); +} + +/* Interleave the four 8-bit values from the low half of M1 with the four + 8-bit values from the low half of M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpcklbw ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_punpcklbw (__m64 __m1, __m64 __m2) +{ + return _mm_unpacklo_pi8 (__m1, __m2); +} + +/* Interleave the two 16-bit values from the low half of M1 with the two + 16-bit values from the low half of M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpcklwd ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_punpcklwd (__m64 __m1, __m64 __m2) +{ + return _mm_unpacklo_pi16 (__m1, __m2); +} + +/* Interleave the 32-bit value from the low half of M1 with the 32-bit + value from the low half of M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpckldq ((__v2si)__m1, (__v2si)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_punpckldq (__m64 __m1, __m64 __m2) +{ + return _mm_unpacklo_pi32 (__m1, __m2); +} + +/* Add the 8-bit values in M1 to the 8-bit values in M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_paddb (__m64 __m1, __m64 __m2) +{ + return _mm_add_pi8 (__m1, __m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_paddw (__m64 __m1, __m64 __m2) +{ + return _mm_add_pi16 (__m1, __m2); +} + +/* Add the 32-bit values in M1 to the 32-bit values in M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddd ((__v2si)__m1, (__v2si)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_paddd (__m64 __m1, __m64 __m2) +{ + return _mm_add_pi32 (__m1, __m2); +} + +/* Add the 64-bit values in M1 to the 64-bit values in M2. */ +#ifdef __SSE2__ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_si64 (__m64 __m1, __m64 __m2) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_paddq (__m1, __m2); +} +#endif + +/* Add the 8-bit values in M1 to the 8-bit values in M2 using signed + saturated arithmetic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_adds_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddsb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_paddsb (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pi8 (__m1, __m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed + saturated arithmetic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_adds_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddsw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_paddsw (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pi16 (__m1, __m2); +} + +/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned + saturated arithmetic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_adds_pu8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddusb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_paddusb (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pu8 (__m1, __m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned + saturated arithmetic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_adds_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddusw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_paddusw (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pu16 (__m1, __m2); +} + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psubb (__m64 __m1, __m64 __m2) +{ + return _mm_sub_pi8 (__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psubw (__m64 __m1, __m64 __m2) +{ + return _mm_sub_pi16 (__m1, __m2); +} + +/* Subtract the 32-bit values in M2 from the 32-bit values in M1. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubd ((__v2si)__m1, (__v2si)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psubd (__m64 __m1, __m64 __m2) +{ + return _mm_sub_pi32 (__m1, __m2); +} + +/* Add the 64-bit values in M1 to the 64-bit values in M2. */ +#ifdef __SSE2__ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_si64 (__m64 __m1, __m64 __m2) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psubq (__m1, __m2); +} +#endif + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed + saturating arithmetic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_subs_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubsb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psubsb (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pi8 (__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using + signed saturating arithmetic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_subs_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubsw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psubsw (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pi16 (__m1, __m2); +} + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using + unsigned saturating arithmetic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_subs_pu8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubusb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psubusb (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pu8 (__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using + unsigned saturating arithmetic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_subs_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubusw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psubusw (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pu16 (__m1, __m2); +} + +/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing + four 32-bit intermediate results, which are then summed by pairs to + produce two 32-bit results. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_madd_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pmaddwd ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pmaddwd (__m64 __m1, __m64 __m2) +{ + return _mm_madd_pi16 (__m1, __m2); +} + +/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in + M2 and produce the high 16 bits of the 32-bit results. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mulhi_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pmulhw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pmulhw (__m64 __m1, __m64 __m2) +{ + return _mm_mulhi_pi16 (__m1, __m2); +} + +/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce + the low 16 bits of the results. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mullo_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pmullw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pmullw (__m64 __m1, __m64 __m2) +{ + return _mm_mullo_pi16 (__m1, __m2); +} + +/* Shift four 16-bit values in M left by COUNT. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sll_pi16 (__m64 __m, __m64 __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psllw ((__v4hi)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psllw (__m64 __m, __m64 __count) +{ + return _mm_sll_pi16 (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_slli_pi16 (__m64 __m, int __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psllwi ((__v4hi)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psllwi (__m64 __m, int __count) +{ + return _mm_slli_pi16 (__m, __count); +} + +/* Shift two 32-bit values in M left by COUNT. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sll_pi32 (__m64 __m, __m64 __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_pslld ((__v2si)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pslld (__m64 __m, __m64 __count) +{ + return _mm_sll_pi32 (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_slli_pi32 (__m64 __m, int __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_pslldi ((__v2si)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pslldi (__m64 __m, int __count) +{ + return _mm_slli_pi32 (__m, __count); +} + +/* Shift the 64-bit value in M left by COUNT. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sll_si64 (__m64 __m, __m64 __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psllq (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psllq (__m64 __m, __m64 __count) +{ + return _mm_sll_si64 (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_slli_si64 (__m64 __m, int __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psllqi (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psllqi (__m64 __m, int __count) +{ + return _mm_slli_si64 (__m, __count); +} + +/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sra_pi16 (__m64 __m, __m64 __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psraw ((__v4hi)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psraw (__m64 __m, __m64 __count) +{ + return _mm_sra_pi16 (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srai_pi16 (__m64 __m, int __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psrawi ((__v4hi)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psrawi (__m64 __m, int __count) +{ + return _mm_srai_pi16 (__m, __count); +} + +/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sra_pi32 (__m64 __m, __m64 __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psrad ((__v2si)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psrad (__m64 __m, __m64 __count) +{ + return _mm_sra_pi32 (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srai_pi32 (__m64 __m, int __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psradi ((__v2si)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psradi (__m64 __m, int __count) +{ + return _mm_srai_pi32 (__m, __count); +} + +/* Shift four 16-bit values in M right by COUNT; shift in zeros. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srl_pi16 (__m64 __m, __m64 __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psrlw (__m64 __m, __m64 __count) +{ + return _mm_srl_pi16 (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srli_pi16 (__m64 __m, int __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psrlwi ((__v4hi)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psrlwi (__m64 __m, int __count) +{ + return _mm_srli_pi16 (__m, __count); +} + +/* Shift two 32-bit values in M right by COUNT; shift in zeros. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srl_pi32 (__m64 __m, __m64 __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psrld ((__v2si)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psrld (__m64 __m, __m64 __count) +{ + return _mm_srl_pi32 (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srli_pi32 (__m64 __m, int __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psrldi ((__v2si)__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psrldi (__m64 __m, int __count) +{ + return _mm_srli_pi32 (__m, __count); +} + +/* Shift the 64-bit value in M left by COUNT; shift in zeros. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srl_si64 (__m64 __m, __m64 __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psrlq (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psrlq (__m64 __m, __m64 __count) +{ + return _mm_srl_si64 (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_srli_si64 (__m64 __m, int __count) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + return (__m64) __builtin_ia32_psrlqi (__m, __count); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psrlqi (__m64 __m, int __count) +{ + return _mm_srli_si64 (__m, __count); +} + +/* Bit-wise AND the 64-bit values in M1 and M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_and_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pand (__m1, __m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pand (__m64 __m1, __m64 __m2) +{ + return _mm_and_si64 (__m1, __m2); +} + +/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the + 64-bit value in M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_andnot_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pandn (__m1, __m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pandn (__m64 __m1, __m64 __m2) +{ + return _mm_andnot_si64 (__m1, __m2); +} + +/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_or_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_por (__m1, __m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_por (__m64 __m1, __m64 __m2) +{ + return _mm_or_si64 (__m1, __m2); +} + +/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_xor_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pxor (__m1, __m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pxor (__m64 __m1, __m64 __m2) +{ + return _mm_xor_si64 (__m1, __m2); +} + +/* Compare eight 8-bit values. The result of the comparison is 0xFF if the + test is true and zero if false. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpeqb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pcmpeqb (__m64 __m1, __m64 __m2) +{ + return _mm_cmpeq_pi8 (__m1, __m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpgtb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pcmpgtb (__m64 __m1, __m64 __m2) +{ + return _mm_cmpgt_pi8 (__m1, __m2); +} + +/* Compare four 16-bit values. The result of the comparison is 0xFFFF if + the test is true and zero if false. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpeqw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pcmpeqw (__m64 __m1, __m64 __m2) +{ + return _mm_cmpeq_pi16 (__m1, __m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpgtw ((__v4hi)__m1, (__v4hi)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pcmpgtw (__m64 __m1, __m64 __m2) +{ + return _mm_cmpgt_pi16 (__m1, __m2); +} + +/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if + the test is true and zero if false. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpeqd ((__v2si)__m1, (__v2si)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pcmpeqd (__m64 __m1, __m64 __m2) +{ + return _mm_cmpeq_pi32 (__m1, __m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpgtd ((__v2si)__m1, (__v2si)__m2); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pcmpgtd (__m64 __m1, __m64 __m2) +{ + return _mm_cmpgt_pi32 (__m1, __m2); +} + +/* Creates a 64-bit zero. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setzero_si64 (void) +{ + return (__m64)0LL; +} + +/* Creates a vector of two 32-bit values; I0 is least significant. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_pi32 (int __i1, int __i0) +{ + return (__m64) __builtin_ia32_vec_init_v2si (__i0, __i1); +} + +/* Creates a vector of four 16-bit values; W0 is least significant. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_pi16 (short __w3, short __w2, short __w1, short __w0) +{ + return (__m64) __builtin_ia32_vec_init_v4hi (__w0, __w1, __w2, __w3); +} + +/* Creates a vector of eight 8-bit values; B0 is least significant. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_pi8 (char __b7, char __b6, char __b5, char __b4, + char __b3, char __b2, char __b1, char __b0) +{ + return (__m64) __builtin_ia32_vec_init_v8qi (__b0, __b1, __b2, __b3, + __b4, __b5, __b6, __b7); +} + +/* Similar, but with the arguments in reverse order. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_pi32 (int __i0, int __i1) +{ + return _mm_set_pi32 (__i1, __i0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3) +{ + return _mm_set_pi16 (__w3, __w2, __w1, __w0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3, + char __b4, char __b5, char __b6, char __b7) +{ + return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0); +} + +/* Creates a vector of two 32-bit values, both elements containing I. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_pi32 (int __i) +{ + return _mm_set_pi32 (__i, __i); +} + +/* Creates a vector of four 16-bit values, all elements containing W. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_pi16 (short __w) +{ + return _mm_set_pi16 (__w, __w, __w, __w); +} + +/* Creates a vector of eight 8-bit values, all elements containing B. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_pi8 (char __b) +{ + return _mm_set_pi8 (__b, __b, __b, __b, __b, __b, __b, __b); +} +/* APPLE LOCAL end radar 4152603 */ + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#undef __always_inline__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +#endif /* __MMX__ */ +#endif /* _MMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/nmmintrin.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/nmmintrin.h new file mode 100644 index 0000000..5c0db20 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/nmmintrin.h @@ -0,0 +1,41 @@ +/* APPLE LOCAL file 5612787 mainline sse4 */ +/* Copyright (C) 2007 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 10.0. */ + +#ifndef _NMMINTRIN_H_INCLUDED +#define _NMMINTRIN_H_INCLUDED + +#ifndef __SSE4_2__ +# error "SSE4.2 instruction set not enabled" +#else +/* We just include SSE4.1 header file. */ +#include +#endif /* __SSE4_2__ */ + +#endif /* _NMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/omp.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/omp.h new file mode 100644 index 0000000..1400282 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/omp.h @@ -0,0 +1,87 @@ +/* Copyright (C) 2005 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for + more details. + + You should have received a copy of the GNU Lesser General Public License + along with libgomp; see the file COPYING.LIB. If not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + MA 02110-1301, USA. */ + +/* As a special exception, if you link this library with other files, some + of which are compiled with GCC, to produce an executable, this library + does not by itself cause the resulting executable to be covered by the + GNU General Public License. This exception does not however invalidate + any other reasons why the executable file might be covered by the GNU + General Public License. */ + +#ifndef OMP_H +#define OMP_H 1 + +#ifndef _LIBGOMP_OMP_LOCK_DEFINED +#define _LIBGOMP_OMP_LOCK_DEFINED 1 +/* These two structures get edited by the libgomp build process to + reflect the shape of the two types. Their internals are private + to the library. */ + +typedef struct +{ + unsigned char _x[64] + __attribute__((__aligned__(8))); +} omp_lock_t; + +typedef struct +{ + unsigned char _x[72] + __attribute__((__aligned__(8))); +} omp_nest_lock_t; +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +extern void omp_set_num_threads (int); +extern int omp_get_num_threads (void); +extern int omp_get_max_threads (void); +extern int omp_get_thread_num (void); +extern int omp_get_num_procs (void); + +extern int omp_in_parallel (void); + +extern void omp_set_dynamic (int); +extern int omp_get_dynamic (void); + +extern void omp_set_nested (int); +extern int omp_get_nested (void); + +extern void omp_init_lock (omp_lock_t *); +extern void omp_destroy_lock (omp_lock_t *); +extern void omp_set_lock (omp_lock_t *); +extern void omp_unset_lock (omp_lock_t *); +extern int omp_test_lock (omp_lock_t *); + +extern void omp_init_nest_lock (omp_nest_lock_t *); +extern void omp_destroy_nest_lock (omp_nest_lock_t *); +extern void omp_set_nest_lock (omp_nest_lock_t *); +extern void omp_unset_nest_lock (omp_nest_lock_t *); +extern int omp_test_nest_lock (omp_nest_lock_t *); + +extern double omp_get_wtime (void); +extern double omp_get_wtick (void); + +#ifdef __cplusplus +} +#endif + +#endif /* OMP_H */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/pmmintrin.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/pmmintrin.h new file mode 100644 index 0000000..7640941 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/pmmintrin.h @@ -0,0 +1,172 @@ +/* APPLE LOCAL file mainline 2005-06-30 Radar 4131077 */ +/* Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef _PMMINTRIN_H_INCLUDED +#define _PMMINTRIN_H_INCLUDED + +#ifdef __SSE3__ +#include +#include + +/* Additional bits in the MXCSR. */ +#define _MM_DENORMALS_ZERO_MASK 0x0040 +#define _MM_DENORMALS_ZERO_ON 0x0040 +#define _MM_DENORMALS_ZERO_OFF 0x0000 + +#define _MM_SET_DENORMALS_ZERO_MODE(mode) \ + _mm_setcsr ((_mm_getcsr () & ~_MM_DENORMALS_ZERO_MASK) | (mode)) +#define _MM_GET_DENORMALS_ZERO_MODE() \ + (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK) + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#define __always_inline__ __always_inline__, __nodebug__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +/* APPLE LOCAL begin radar 5618945 */ +#undef __STATIC_INLINE +#ifdef __GNUC_STDC_INLINE__ +#define __STATIC_INLINE __inline +#else +#define __STATIC_INLINE static __inline +#endif +/* APPLE LOCAL end radar 5618945 */ + +/* APPLE LOCAL begin radar 4152603 */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_addsub_ps (__m128 __X, __m128 __Y) +{ + return (__m128) __builtin_ia32_addsubps ((__v4sf)__X, (__v4sf)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hadd_ps (__m128 __X, __m128 __Y) +{ + return (__m128) __builtin_ia32_haddps ((__v4sf)__X, (__v4sf)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hsub_ps (__m128 __X, __m128 __Y) +{ + return (__m128) __builtin_ia32_hsubps ((__v4sf)__X, (__v4sf)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movehdup_ps (__m128 __X) +{ + return (__m128) __builtin_ia32_movshdup ((__v4sf)__X); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_moveldup_ps (__m128 __X) +{ + return (__m128) __builtin_ia32_movsldup ((__v4sf)__X); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_addsub_pd (__m128d __X, __m128d __Y) +{ + return (__m128d) __builtin_ia32_addsubpd ((__v2df)__X, (__v2df)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hadd_pd (__m128d __X, __m128d __Y) +{ + return (__m128d) __builtin_ia32_haddpd ((__v2df)__X, (__v2df)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hsub_pd (__m128d __X, __m128d __Y) +{ + return (__m128d) __builtin_ia32_hsubpd ((__v2df)__X, (__v2df)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loaddup_pd (double const *__P) +{ + return _mm_load1_pd (__P); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movedup_pd (__m128d __X) +{ + return _mm_shuffle_pd (__X, __X, _MM_SHUFFLE2 (0,0)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_lddqu_si128 (__m128i const *__P) +{ + return (__m128i) __builtin_ia32_lddqu ((char const *)__P); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_monitor (void const * __P, unsigned int __E, unsigned int __H) +{ + __builtin_ia32_monitor (__P, __E, __H); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mwait (unsigned int __E, unsigned int __H) +{ + __builtin_ia32_mwait (__E, __H); +} +/* APPLE LOCAL end radar 4152603 */ +/* APPLE LOCAL begin nodebug inline 4152603 */ +#undef __always_inline__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +#endif /* __SSE3__ */ + +#endif /* _PMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/ppc_intrinsics.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/ppc_intrinsics.h new file mode 120000 index 0000000..9383ee4 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/ppc_intrinsics.h @@ -0,0 +1 @@ +../../../../../include/gcc/darwin/4.2/ppc_intrinsics.h \ No newline at end of file diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/smmintrin.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/smmintrin.h new file mode 100644 index 0000000..2da9a74 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/smmintrin.h @@ -0,0 +1,836 @@ +/* APPLE LOCAL file 5612787 mainline sse4 */ +/* Copyright (C) 2007 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 10.0. */ + +#ifndef _SMMINTRIN_H_INCLUDED +#define _SMMINTRIN_H_INCLUDED + +#ifndef __SSE4_1__ +# error "SSE4.1 instruction set not enabled" +#else + +/* We need definitions from the SSSE3, SSE3, SSE2 and SSE header + files. */ +#include + +/* SSE4.1 */ + +/* Rounding mode macros. */ +#define _MM_FROUND_TO_NEAREST_INT 0x00 +#define _MM_FROUND_TO_NEG_INF 0x01 +#define _MM_FROUND_TO_POS_INF 0x02 +#define _MM_FROUND_TO_ZERO 0x03 +#define _MM_FROUND_CUR_DIRECTION 0x04 + +#define _MM_FROUND_RAISE_EXC 0x00 +#define _MM_FROUND_NO_EXC 0x08 + +#define _MM_FROUND_NINT \ + (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_FLOOR \ + (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_CEIL \ + (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_TRUNC \ + (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_RINT \ + (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_NEARBYINT \ + (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC) + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#define __always_inline__ __always_inline__, __nodebug__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +/* APPLE LOCAL begin radar 5618945 */ +#undef __STATIC_INLINE +#ifdef __GNUC_STDC_INLINE__ +#define __STATIC_INLINE __inline +#else +#define __STATIC_INLINE static __inline +#endif +/* APPLE LOCAL end radar 5618945 */ + +/* Integer blend instructions - select data from 2 sources using + constant/variable mask. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_blend_epi16 (__m128i __X, __m128i __Y, const int __M) +{ + return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__X, + (__v8hi)__Y, + __M); +} +#else +#define _mm_blend_epi16(X, Y, M) \ + ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(X), (__v8hi)(Y), (M))) +#endif + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_blendv_epi8 (__m128i __X, __m128i __Y, __m128i __M) +{ + return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__X, + (__v16qi)__Y, + (__v16qi)__M); +} + +/* Single precision floating point blend instructions - select data + from 2 sources using constant/variable mask. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +_mm_blend_ps (__m128 __X, __m128 __Y, const int __M) +{ + return (__m128) __builtin_ia32_blendps ((__v4sf)__X, + (__v4sf)__Y, + __M); +} +#else +#define _mm_blend_ps(X, Y, M) \ + ((__m128) __builtin_ia32_blendps ((__v4sf)(X), (__v4sf)(Y), (M))) +#endif + +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +_mm_blendv_ps (__m128 __X, __m128 __Y, __m128 __M) +{ + return (__m128) __builtin_ia32_blendvps ((__v4sf)__X, + (__v4sf)__Y, + (__v4sf)__M); +} + +/* Double precision floating point blend instructions - select data + from 2 sources using constant/variable mask. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +_mm_blend_pd (__m128d __X, __m128d __Y, const int __M) +{ + return (__m128d) __builtin_ia32_blendpd ((__v2df)__X, + (__v2df)__Y, + __M); +} +#else +#define _mm_blend_pd(X, Y, M) \ + ((__m128d) __builtin_ia32_blendpd ((__v2df)(X), (__v2df)(Y), (M))) +#endif + +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +_mm_blendv_pd (__m128d __X, __m128d __Y, __m128d __M) +{ + return (__m128d) __builtin_ia32_blendvpd ((__v2df)__X, + (__v2df)__Y, + (__v2df)__M); +} + +/* Dot product instructions with mask-defined summing and zeroing parts + of result. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +_mm_dp_ps (__m128 __X, __m128 __Y, const int __M) +{ + return (__m128) __builtin_ia32_dpps ((__v4sf)__X, + (__v4sf)__Y, + __M); +} + +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +_mm_dp_pd (__m128d __X, __m128d __Y, const int __M) +{ + return (__m128d) __builtin_ia32_dppd ((__v2df)__X, + (__v2df)__Y, + __M); +} +#else +#define _mm_dp_ps(X, Y, M) \ + ((__m128) __builtin_ia32_dpps ((__v4sf)(X), (__v4sf)(Y), (M))) + +#define _mm_dp_pd(X, Y, M) \ + ((__m128d) __builtin_ia32_dppd ((__v2df)(X), (__v2df)(Y), (M))) +#endif + +/* Packed integer 64-bit comparison, zeroing or filling with ones + corresponding parts of result. */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cmpeq_epi64 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pcmpeqq ((__v2di)__X, (__v2di)__Y); +} + +/* Min/max packed integer instructions. */ + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_min_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pminsb128 ((__v16qi)__X, (__v16qi)__Y); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_max_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi)__X, (__v16qi)__Y); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_min_epu16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pminuw128 ((__v8hi)__X, (__v8hi)__Y); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_max_epu16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi)__X, (__v8hi)__Y); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_min_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pminsd128 ((__v4si)__X, (__v4si)__Y); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_max_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si)__X, (__v4si)__Y); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_min_epu32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pminud128 ((__v4si)__X, (__v4si)__Y); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_max_epu32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaxud128 ((__v4si)__X, (__v4si)__Y); +} + +/* Packed integer 32-bit multiplication with truncation of upper + halves of results. */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_mullo_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmulld128 ((__v4si)__X, (__v4si)__Y); +} + +/* Packed integer 32-bit multiplication of 2 pairs of operands + with two 64-bit results. */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_mul_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__X, (__v4si)__Y); +} + +/* Packed integer 128-bit bitwise comparison. Return 1 if + (__V & __M) == 0. */ +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_testz_si128 (__m128i __M, __m128i __V) +{ + return __builtin_ia32_ptestz128 ((__v2di)__M, (__v2di)__V); +} + +/* Packed integer 128-bit bitwise comparison. Return 1 if + (__V & ~__M) == 0. */ +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_testc_si128 (__m128i __M, __m128i __V) +{ + return __builtin_ia32_ptestc128 ((__v2di)__M, (__v2di)__V); +} + +/* Packed integer 128-bit bitwise comparison. Return 1 if + (__V & __M) != 0 && (__V & ~__M) != 0. */ +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_testnzc_si128 (__m128i __M, __m128i __V) +{ + return __builtin_ia32_ptestnzc128 ((__v2di)__M, (__v2di)__V); +} + +/* Macros for packed integer 128-bit comparison intrinsics. */ +#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V)) + +#define _mm_test_all_ones(V) \ + _mm_testc_si128 ((V), _mm_cmpeq_epi32 ((V), (V))) + +#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128 ((M), (V)) + +/* Insert single precision float into packed single precision array + element selected by index N. The bits [7-6] of N define S + index, the bits [5-4] define D index, and bits [3-0] define + zeroing mask for D. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +_mm_insert_ps (__m128 __D, __m128 __S, const int __N) +{ + return (__m128) __builtin_ia32_insertps128 ((__v4sf)__D, + (__v4sf)__S, + __N); +} +#else +#define _mm_insert_ps(D, S, N) \ + ((__m128) __builtin_ia32_insertps128 ((__v4sf)(D), (__v4sf)(S), (N))) +#endif + +/* Helper macro to create the N value for _mm_insert_ps. */ +#define _MM_MK_INSERTPS_NDX(S, D, M) (((S) << 6) | ((D) << 4) | (M)) + +/* Extract binary representation of single precision float from packed + single precision array element of X selected by index N. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_extract_ps (__m128 __X, const int __N) +{ + union { int i; float f; } __tmp; + __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)__X, __N); + return __tmp.i; +} +#else +#define _mm_extract_ps(X, N) \ + (__extension__ \ + ({ \ + union { int i; float f; } __tmp; \ + __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)(X), (N)); \ + __tmp.i; \ + }) \ + ) +#endif + +/* Extract binary representation of single precision float into + D from packed single precision array element of S selected + by index N. */ +#define _MM_EXTRACT_FLOAT(D, S, N) \ + { (D) = __builtin_ia32_vec_ext_v4sf ((__v4sf)(S), (N)); } + +/* Extract specified single precision float element into the lower + part of __m128. */ +#define _MM_PICK_OUT_PS(X, N) \ + _mm_insert_ps (_mm_setzero_ps (), (X), \ + _MM_MK_INSERTPS_NDX ((N), 0, 0x0e)) + +/* Insert integer, S, into packed integer array element of D + selected by index N. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_insert_epi8 (__m128i __D, int __S, const int __N) +{ + return (__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)__D, + __S, __N); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_insert_epi32 (__m128i __D, int __S, const int __N) +{ + return (__m128i) __builtin_ia32_vec_set_v4si ((__v4si)__D, + __S, __N); +} + +#ifdef __x86_64__ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_insert_epi64 (__m128i __D, long long __S, const int __N) +{ + return (__m128i) __builtin_ia32_vec_set_v2di ((__v2di)__D, + __S, __N); +} +#endif +#else +#define _mm_insert_epi8(D, S, N) \ + ((__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)(D), (S), (N))) + +#define _mm_insert_epi32(D, S, N) \ + ((__m128i) __builtin_ia32_vec_set_v4si ((__v4si)(D), (S), (N))) + +#ifdef __x86_64__ +#define _mm_insert_epi64(D, S, N) \ + ((__m128i) __builtin_ia32_vec_set_v2di ((__v2di)(D), (S), (N))) +#endif +#endif + +/* Extract integer from packed integer array element of X selected by + index N. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_extract_epi8 (__m128i __X, const int __N) +{ + return __builtin_ia32_vec_ext_v16qi ((__v16qi)__X, __N); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_extract_epi32 (__m128i __X, const int __N) +{ + return __builtin_ia32_vec_ext_v4si ((__v4si)__X, __N); +} + +#ifdef __x86_64__ +__STATIC_INLINE long long __attribute__((__always_inline__)) +_mm_extract_epi64 (__m128i __X, const int __N) +{ + return __builtin_ia32_vec_ext_v2di ((__v2di)__X, __N); +} +#endif +#else +#define _mm_extract_epi8(X, N) \ + __builtin_ia32_vec_ext_v16qi ((__v16qi) X, (N)) +#define _mm_extract_epi32(X, N) \ + __builtin_ia32_vec_ext_v4si ((__v4si) X, (N)) + +#ifdef __x86_64__ +#define _mm_extract_epi64(X, N) \ + ((long long) __builtin_ia32_vec_ext_v2di ((__v2di)(X), (N))) +#endif +#endif + +/* Return horizontal packed word minimum and its index in bits [15:0] + and bits [18:16] respectively. */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_minpos_epu16 (__m128i __X) +{ + return (__m128i) __builtin_ia32_phminposuw128 ((__v8hi)__X); +} + +/* Packed/scalar double precision floating point rounding. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +_mm_round_pd (__m128d __V, const int __M) +{ + return (__m128d) __builtin_ia32_roundpd ((__v2df)__V, __M); +} + +__STATIC_INLINE __m128d __attribute__((__always_inline__)) +_mm_round_sd(__m128d __D, __m128d __V, const int __M) +{ + return (__m128d) __builtin_ia32_roundsd ((__v2df)__D, + (__v2df)__V, + __M); +} +#else +#define _mm_round_pd(V, M) \ + ((__m128d) __builtin_ia32_roundpd ((__v2df)(V), (M))) + +#define _mm_round_sd(D, V, M) \ + ((__m128d) __builtin_ia32_roundsd ((__v2df)(D), (__v2df)(V), (M))) +#endif + +/* Packed/scalar single precision floating point rounding. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +_mm_round_ps (__m128 __V, const int __M) +{ + return (__m128) __builtin_ia32_roundps ((__v4sf)__V, __M); +} + +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +_mm_round_ss (__m128 __D, __m128 __V, const int __M) +{ + return (__m128) __builtin_ia32_roundss ((__v4sf)__D, + (__v4sf)__V, + __M); +} +#else +#define _mm_round_ps(V, M) \ + ((__m128) __builtin_ia32_roundps ((__v4sf)(V), (M))) + +#define _mm_round_ss(D, V, M) \ + ((__m128) __builtin_ia32_roundss ((__v4sf)(D), (__v4sf)(V), (M))) +#endif + +/* Macros for ceil/floor intrinsics. */ +#define _mm_ceil_pd(V) _mm_round_pd ((V), _MM_FROUND_CEIL) +#define _mm_ceil_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_CEIL) + +#define _mm_floor_pd(V) _mm_round_pd((V), _MM_FROUND_FLOOR) +#define _mm_floor_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_FLOOR) + +#define _mm_ceil_ps(V) _mm_round_ps ((V), _MM_FROUND_CEIL) +#define _mm_ceil_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_CEIL) + +#define _mm_floor_ps(V) _mm_round_ps ((V), _MM_FROUND_FLOOR) +#define _mm_floor_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_FLOOR) + +/* Packed integer sign-extension. */ + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepi8_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxbd128 ((__v16qi)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepi16_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxwd128 ((__v8hi)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepi8_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxbq128 ((__v16qi)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepi32_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxdq128 ((__v4si)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepi16_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxwq128 ((__v8hi)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepi8_epi16 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxbw128 ((__v16qi)__X); +} + +/* Packed integer zero-extension. */ + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepu8_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxbd128 ((__v16qi)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepu16_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxwd128 ((__v8hi)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepu8_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxbq128 ((__v16qi)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepu32_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxdq128 ((__v4si)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepu16_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxwq128 ((__v8hi)__X); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cvtepu8_epi16 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxbw128 ((__v16qi)__X); +} + +/* Pack 8 double words from 2 operands into 8 words of result with + unsigned saturation. */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_packus_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_packusdw128 ((__v4si)__X, (__v4si)__Y); +} + +/* Sum absolute 8-bit integer difference of adjacent groups of 4 + byte integers in the first 2 operands. Starting offsets within + operands are determined by the 3rd mask operand. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_mpsadbw_epu8 (__m128i __X, __m128i __Y, const int __M) +{ + return (__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)__X, + (__v16qi)__Y, __M); +} +#else +#define _mm_mpsadbw_epu8(X, Y, M) \ + ((__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)(X), (__v16qi)(Y), (M))) +#endif + +/* Load double quadword using non-temporal aligned hint. */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_stream_load_si128 (__m128i *__X) +{ + return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __X); +} + +#ifdef __SSE4_2__ + +/* These macros specify the source data format. */ +#define SIDD_UBYTE_OPS 0x00 +#define SIDD_UWORD_OPS 0x01 +#define SIDD_SBYTE_OPS 0x02 +#define SIDD_SWORD_OPS 0x03 + +/* These macros specify the comparison operation. */ +#define SIDD_CMP_EQUAL_ANY 0x00 +#define SIDD_CMP_RANGES 0x04 +#define SIDD_CMP_EQUAL_EACH 0x08 +#define SIDD_CMP_EQUAL_ORDERED 0x0c + +/* These macros specify the the polarity. */ +#define SIDD_POSITIVE_POLARITY 0x00 +#define SIDD_NEGATIVE_POLARITY 0x10 +#define SIDD_MASKED_POSITIVE_POLARITY 0x20 +#define SIDD_MASKED_NEGATIVE_POLARITY 0x30 + +/* These macros specify the output selection in _mm_cmpXstri (). */ +#define SIDD_LEAST_SIGNIFICANT 0x00 +#define SIDD_MOST_SIGNIFICANT 0x40 + +/* These macros specify the output selection in _mm_cmpXstrm (). */ +#define SIDD_BIT_MASK 0x00 +#define SIDD_UNIT_MASK 0x40 + +/* Intrinsics for text/string processing. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cmpistrm (__m128i __X, __m128i __Y, const int __M) +{ + return (__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpistri (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistri128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cmpestrm (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return (__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpestri (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestri128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} +#else +#define _mm_cmpistrm(X, Y, M) \ + ((__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)(X), (__v16qi)(Y), (M))) +#define _mm_cmpistri(X, Y, M) \ + __builtin_ia32_pcmpistri128 ((__v16qi)(X), (__v16qi)(Y), (M)) + +#define _mm_cmpestrm(X, LX, Y, LY, M) \ + ((__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)(X), (int)(LX), \ + (__v16qi)(Y), (int)(LY), (M))) +#define _mm_cmpestri(X, LX, Y, LY, M) \ + __builtin_ia32_pcmpestri128 ((__v16qi)(X), (int)(LX), \ + (__v16qi)(Y), (int)(LY), (M)) +#endif + +/* Intrinsics for text/string processing and reading values of + EFlags. */ + +#ifdef __OPTIMIZE__ +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpistra (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistria128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpistrc (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistric128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpistro (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistrio128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpistrs (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistris128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpistrz (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistriz128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpestra (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestria128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpestrc (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestric128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpestro (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestrio128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpestrs (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestris128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_cmpestrz (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestriz128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} +#else +#define _mm_cmpistra(X, Y, M) \ + __builtin_ia32_pcmpistria128 ((__v16qi)(X), (__v16qi)(Y), (M)) +#define _mm_cmpistrc(X, Y, M) \ + __builtin_ia32_pcmpistric128 ((__v16qi)(X), (__v16qi)(Y), (M)) +#define _mm_cmpistro(X, Y, M) \ + __builtin_ia32_pcmpistrio128 ((__v16qi)(X), (__v16qi)(Y), (M)) +#define _mm_cmpistrs(X, Y, M) \ + __builtin_ia32_pcmpistris128 ((__v16qi)(X), (__v16qi)(Y), (M)) +#define _mm_cmpistrz(X, Y, M) \ + __builtin_ia32_pcmpistriz128 ((__v16qi)(X), (__v16qi)(Y), (M)) + +#define _mm_cmpestra(X, LX, Y, LY, M) \ + __builtin_ia32_pcmpestria128 ((__v16qi)(X), (int)(LX), \ + (__v16qi)(Y), (int)(LY), (M)) +#define _mm_cmpestrc(X, LX, Y, LY, M) \ + __builtin_ia32_pcmpestric128 ((__v16qi)(X), (int)(LX), \ + (__v16qi)(Y), (int)(LY), (M)) +#define _mm_cmpestro(X, LX, Y, LY, M) \ + __builtin_ia32_pcmpestrio128 ((__v16qi)(X), (int)(LX), \ + (__v16qi)(Y), (int)(LY), (M)) +#define _mm_cmpestrs(X, LX, Y, LY, M) \ + __builtin_ia32_pcmpestris128 ((__v16qi)(X), (int)(LX), \ + (__v16qi)(Y), (int)(LY), (M)) +#define _mm_cmpestrz(X, LX, Y, LY, M) \ + __builtin_ia32_pcmpestriz128 ((__v16qi)(X), (int)(LX), \ + (__v16qi)(Y), (int)(LY), (M)) +#endif + +/* Packed integer 64-bit comparison, zeroing or filling with ones + corresponding parts of result. */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +_mm_cmpgt_epi64 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pcmpgtq ((__v2di)__X, (__v2di)__Y); +} + +/* Calculate a number of bits set to 1. */ +__STATIC_INLINE int __attribute__((__always_inline__)) +_mm_popcnt_u32 (unsigned int __X) +{ + return __builtin_popcount (__X); +} + +#ifdef __x86_64__ +__STATIC_INLINE long long __attribute__((__always_inline__)) +_mm_popcnt_u64 (unsigned long long __X) +{ + return __builtin_popcountll (__X); +} +#endif + +/* Accumulate CRC32 (polynomial 0x11EDC6F41) value. */ +__STATIC_INLINE unsigned int __attribute__((__always_inline__)) +_mm_crc32_u8 (unsigned int __C, unsigned char __V) +{ + return __builtin_ia32_crc32qi (__C, __V); +} + +__STATIC_INLINE unsigned int __attribute__((__always_inline__)) +_mm_crc32_u16 (unsigned int __C, unsigned short __V) +{ + return __builtin_ia32_crc32hi (__C, __V); +} + +__STATIC_INLINE unsigned int __attribute__((__always_inline__)) +_mm_crc32_u32 (unsigned int __C, unsigned int __V) +{ + return __builtin_ia32_crc32si (__C, __V); +} + +#ifdef __x86_64__ +__STATIC_INLINE unsigned long long __attribute__((__always_inline__)) +_mm_crc32_u64 (unsigned long long __C, unsigned long long __V) +{ + return __builtin_ia32_crc32di (__C, __V); +} +#endif + +#endif /* __SSE4_2__ */ + +#endif /* __SSE4_1__ */ + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#undef __always_inline__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +#endif /* _SMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/stdarg.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/stdarg.h new file mode 100644 index 0000000..c9ddd6b --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/stdarg.h @@ -0,0 +1,133 @@ +/* Copyright (C) 1989, 1997, 1998, 1999, 2000 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* + * ISO C Standard: 7.15 Variable arguments + */ + +#ifndef _STDARG_H +#ifndef _ANSI_STDARG_H_ +#ifndef __need___va_list +#define _STDARG_H +#define _ANSI_STDARG_H_ +#endif /* not __need___va_list */ +#undef __need___va_list + +/* Define __gnuc_va_list. */ + +#ifndef __GNUC_VA_LIST +#define __GNUC_VA_LIST +typedef __builtin_va_list __gnuc_va_list; +#endif + +/* Define the standard macros for the user, + if this invocation was from the user program. */ +#ifdef _STDARG_H + +#define va_start(v,l) __builtin_va_start(v,l) +#define va_end(v) __builtin_va_end(v) +#define va_arg(v,l) __builtin_va_arg(v,l) +#if !defined(__STRICT_ANSI__) || __STDC_VERSION__ + 0 >= 199900L +#define va_copy(d,s) __builtin_va_copy(d,s) +#endif +#define __va_copy(d,s) __builtin_va_copy(d,s) + +/* Define va_list, if desired, from __gnuc_va_list. */ +/* We deliberately do not define va_list when called from + stdio.h, because ANSI C says that stdio.h is not supposed to define + va_list. stdio.h needs to have access to that data type, + but must not use that name. It should use the name __gnuc_va_list, + which is safe because it is reserved for the implementation. */ + +#ifdef _HIDDEN_VA_LIST /* On OSF1, this means varargs.h is "half-loaded". */ +#undef _VA_LIST +#endif + +#ifdef _BSD_VA_LIST +#undef _BSD_VA_LIST +#endif + +#if defined(__svr4__) || (defined(_SCO_DS) && !defined(__VA_LIST)) +/* SVR4.2 uses _VA_LIST for an internal alias for va_list, + so we must avoid testing it and setting it here. + SVR4 uses _VA_LIST as a flag in stdarg.h, but we should + have no conflict with that. */ +#ifndef _VA_LIST_ +#define _VA_LIST_ +#ifdef __i860__ +#ifndef _VA_LIST +#define _VA_LIST va_list +#endif +#endif /* __i860__ */ +typedef __gnuc_va_list va_list; +#ifdef _SCO_DS +#define __VA_LIST +#endif +#endif /* _VA_LIST_ */ +#else /* not __svr4__ || _SCO_DS */ + +/* The macro _VA_LIST_ is the same thing used by this file in Ultrix. + But on BSD NET2 we must not test or define or undef it. + (Note that the comments in NET 2's ansi.h + are incorrect for _VA_LIST_--see stdio.h!) */ +#if !defined (_VA_LIST_) || defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__) || defined(WINNT) +/* The macro _VA_LIST_DEFINED is used in Windows NT 3.5 */ +#ifndef _VA_LIST_DEFINED +/* The macro _VA_LIST is used in SCO Unix 3.2. */ +#ifndef _VA_LIST +/* The macro _VA_LIST_T_H is used in the Bull dpx2 */ +#ifndef _VA_LIST_T_H +/* The macro __va_list__ is used by BeOS. */ +#ifndef __va_list__ +typedef __gnuc_va_list va_list; +#endif /* not __va_list__ */ +#endif /* not _VA_LIST_T_H */ +#endif /* not _VA_LIST */ +#endif /* not _VA_LIST_DEFINED */ +#if !(defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__)) +#define _VA_LIST_ +#endif +#ifndef _VA_LIST +#define _VA_LIST +#endif +#ifndef _VA_LIST_DEFINED +#define _VA_LIST_DEFINED +#endif +#ifndef _VA_LIST_T_H +#define _VA_LIST_T_H +#endif +#ifndef __va_list__ +#define __va_list__ +#endif + +#endif /* not _VA_LIST_, except on certain systems */ + +#endif /* not __svr4__ */ + +#endif /* _STDARG_H */ + +#endif /* not _ANSI_STDARG_H_ */ +#endif /* not _STDARG_H */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/stdbool.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/stdbool.h new file mode 100644 index 0000000..b36e650 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/stdbool.h @@ -0,0 +1,53 @@ +/* Copyright (C) 1998, 1999, 2000 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* + * ISO C Standard: 7.16 Boolean type and values + */ + +#ifndef _STDBOOL_H +#define _STDBOOL_H + +#ifndef __cplusplus + +#define bool _Bool +#define true 1 +#define false 0 + +#else /* __cplusplus */ + +/* Supporting in C++ is a GCC extension. */ +#define _Bool bool +#define bool bool +#define false false +#define true true + +#endif /* __cplusplus */ + +/* Signal that all the definitions are present. */ +#define __bool_true_false_are_defined 1 + +#endif /* stdbool.h */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/stddef.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/stddef.h new file mode 100644 index 0000000..7e61795 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/stddef.h @@ -0,0 +1,419 @@ +/* Copyright (C) 1989, 1997, 1998, 1999, 2000, 2002, 2004 + Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* + * ISO C Standard: 7.17 Common definitions + */ +#if (!defined(_STDDEF_H) && !defined(_STDDEF_H_) && !defined(_ANSI_STDDEF_H) \ + && !defined(__STDDEF_H__)) \ + || defined(__need_wchar_t) || defined(__need_size_t) \ + || defined(__need_ptrdiff_t) || defined(__need_NULL) \ + || defined(__need_wint_t) + +/* Any one of these symbols __need_* means that GNU libc + wants us just to define one data type. So don't define + the symbols that indicate this file's entire job has been done. */ +#if (!defined(__need_wchar_t) && !defined(__need_size_t) \ + && !defined(__need_ptrdiff_t) && !defined(__need_NULL) \ + && !defined(__need_wint_t)) +#define _STDDEF_H +#define _STDDEF_H_ +/* snaroff@next.com says the NeXT needs this. */ +#define _ANSI_STDDEF_H +/* Irix 5.1 needs this. */ +#define __STDDEF_H__ +#endif + +#ifndef __sys_stdtypes_h +/* This avoids lossage on SunOS but only if stdtypes.h comes first. + There's no way to win with the other order! Sun lossage. */ + +/* On 4.3bsd-net2, make sure ansi.h is included, so we have + one less case to deal with in the following. */ +#if defined (__BSD_NET2__) || defined (____386BSD____) || (defined (__FreeBSD__) && (__FreeBSD__ < 5)) || defined(__NetBSD__) +#include +#endif +/* On FreeBSD 5, machine/ansi.h does not exist anymore... */ +#if defined (__FreeBSD__) && (__FreeBSD__ >= 5) +#include +#endif + +/* In 4.3bsd-net2, machine/ansi.h defines these symbols, which are + defined if the corresponding type is *not* defined. + FreeBSD-2.1 defines _MACHINE_ANSI_H_ instead of _ANSI_H_ */ +#if defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) +#if !defined(_SIZE_T_) && !defined(_BSD_SIZE_T_) +#define _SIZE_T +#endif +#if !defined(_PTRDIFF_T_) && !defined(_BSD_PTRDIFF_T_) +#define _PTRDIFF_T +#endif +/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_ + instead of _WCHAR_T_. */ +#if !defined(_WCHAR_T_) && !defined(_BSD_WCHAR_T_) +#ifndef _BSD_WCHAR_T_ +#define _WCHAR_T +#endif +#endif +/* Undef _FOO_T_ if we are supposed to define foo_t. */ +#if defined (__need_ptrdiff_t) || defined (_STDDEF_H_) +#undef _PTRDIFF_T_ +#undef _BSD_PTRDIFF_T_ +#endif +#if defined (__need_size_t) || defined (_STDDEF_H_) +#undef _SIZE_T_ +#undef _BSD_SIZE_T_ +#endif +#if defined (__need_wchar_t) || defined (_STDDEF_H_) +#undef _WCHAR_T_ +#undef _BSD_WCHAR_T_ +#endif +#endif /* defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) */ + +/* Sequent's header files use _PTRDIFF_T_ in some conflicting way. + Just ignore it. */ +#if defined (__sequent__) && defined (_PTRDIFF_T_) +#undef _PTRDIFF_T_ +#endif + +/* On VxWorks, may have defined macros like + _TYPE_size_t which will typedef size_t. fixincludes patched the + vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is + not defined, and so that defining this macro defines _GCC_SIZE_T. + If we find that the macros are still defined at this point, we must + invoke them so that the type is defined as expected. */ +#if defined (_TYPE_ptrdiff_t) && (defined (__need_ptrdiff_t) || defined (_STDDEF_H_)) +_TYPE_ptrdiff_t; +#undef _TYPE_ptrdiff_t +#endif +#if defined (_TYPE_size_t) && (defined (__need_size_t) || defined (_STDDEF_H_)) +_TYPE_size_t; +#undef _TYPE_size_t +#endif +#if defined (_TYPE_wchar_t) && (defined (__need_wchar_t) || defined (_STDDEF_H_)) +_TYPE_wchar_t; +#undef _TYPE_wchar_t +#endif + +/* In case nobody has defined these types, but we aren't running under + GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and + __WCHAR_TYPE__ have reasonable values. This can happen if the + parts of GCC is compiled by an older compiler, that actually + include gstddef.h, such as collect2. */ + +/* Signed type of difference of two pointers. */ + +/* Define this type if we are doing the whole job, + or if we want this type in particular. */ +#if defined (_STDDEF_H) || defined (__need_ptrdiff_t) +#ifndef _PTRDIFF_T /* in case has defined it. */ +#ifndef _T_PTRDIFF_ +#ifndef _T_PTRDIFF +#ifndef __PTRDIFF_T +#ifndef _PTRDIFF_T_ +#ifndef _BSD_PTRDIFF_T_ +#ifndef ___int_ptrdiff_t_h +#ifndef _GCC_PTRDIFF_T +#define _PTRDIFF_T +#define _T_PTRDIFF_ +#define _T_PTRDIFF +#define __PTRDIFF_T +#define _PTRDIFF_T_ +#define _BSD_PTRDIFF_T_ +#define ___int_ptrdiff_t_h +#define _GCC_PTRDIFF_T +#ifndef __PTRDIFF_TYPE__ +#define __PTRDIFF_TYPE__ long int +#endif +typedef __PTRDIFF_TYPE__ ptrdiff_t; +#endif /* _GCC_PTRDIFF_T */ +#endif /* ___int_ptrdiff_t_h */ +#endif /* _BSD_PTRDIFF_T_ */ +#endif /* _PTRDIFF_T_ */ +#endif /* __PTRDIFF_T */ +#endif /* _T_PTRDIFF */ +#endif /* _T_PTRDIFF_ */ +#endif /* _PTRDIFF_T */ + +/* If this symbol has done its job, get rid of it. */ +#undef __need_ptrdiff_t + +#endif /* _STDDEF_H or __need_ptrdiff_t. */ + +/* Unsigned type of `sizeof' something. */ + +/* Define this type if we are doing the whole job, + or if we want this type in particular. */ +#if defined (_STDDEF_H) || defined (__need_size_t) +#ifndef __size_t__ /* BeOS */ +#ifndef __SIZE_T__ /* Cray Unicos/Mk */ +#ifndef _SIZE_T /* in case has defined it. */ +#ifndef _SYS_SIZE_T_H +#ifndef _T_SIZE_ +#ifndef _T_SIZE +#ifndef __SIZE_T +#ifndef _SIZE_T_ +#ifndef _BSD_SIZE_T_ +#ifndef _SIZE_T_DEFINED_ +#ifndef _SIZE_T_DEFINED +#ifndef _BSD_SIZE_T_DEFINED_ /* Darwin */ +#ifndef _SIZE_T_DECLARED /* FreeBSD 5 */ +#ifndef ___int_size_t_h +#ifndef _GCC_SIZE_T +#ifndef _SIZET_ +#ifndef __size_t +#define __size_t__ /* BeOS */ +#define __SIZE_T__ /* Cray Unicos/Mk */ +#define _SIZE_T +#define _SYS_SIZE_T_H +#define _T_SIZE_ +#define _T_SIZE +#define __SIZE_T +#define _SIZE_T_ +#define _BSD_SIZE_T_ +#define _SIZE_T_DEFINED_ +#define _SIZE_T_DEFINED +#define _BSD_SIZE_T_DEFINED_ /* Darwin */ +#define _SIZE_T_DECLARED /* FreeBSD 5 */ +#define ___int_size_t_h +#define _GCC_SIZE_T +#define _SIZET_ +#if defined (__FreeBSD__) && (__FreeBSD__ >= 5) +/* __size_t is a typedef on FreeBSD 5!, must not trash it. */ +#else +#define __size_t +#endif +#ifndef __SIZE_TYPE__ +#define __SIZE_TYPE__ long unsigned int +#endif +#if !(defined (__GNUG__) && defined (size_t)) +typedef __SIZE_TYPE__ size_t; +#ifdef __BEOS__ +typedef long ssize_t; +#endif /* __BEOS__ */ +#endif /* !(defined (__GNUG__) && defined (size_t)) */ +#endif /* __size_t */ +#endif /* _SIZET_ */ +#endif /* _GCC_SIZE_T */ +#endif /* ___int_size_t_h */ +#endif /* _SIZE_T_DECLARED */ +#endif /* _BSD_SIZE_T_DEFINED_ */ +#endif /* _SIZE_T_DEFINED */ +#endif /* _SIZE_T_DEFINED_ */ +#endif /* _BSD_SIZE_T_ */ +#endif /* _SIZE_T_ */ +#endif /* __SIZE_T */ +#endif /* _T_SIZE */ +#endif /* _T_SIZE_ */ +#endif /* _SYS_SIZE_T_H */ +#endif /* _SIZE_T */ +#endif /* __SIZE_T__ */ +#endif /* __size_t__ */ +#undef __need_size_t +#endif /* _STDDEF_H or __need_size_t. */ + + +/* Wide character type. + Locale-writers should change this as necessary to + be big enough to hold unique values not between 0 and 127, + and not (wchar_t) -1, for each defined multibyte character. */ + +/* Define this type if we are doing the whole job, + or if we want this type in particular. */ +#if defined (_STDDEF_H) || defined (__need_wchar_t) +#ifndef __wchar_t__ /* BeOS */ +#ifndef __WCHAR_T__ /* Cray Unicos/Mk */ +#ifndef _WCHAR_T +#ifndef _T_WCHAR_ +#ifndef _T_WCHAR +#ifndef __WCHAR_T +#ifndef _WCHAR_T_ +#ifndef _BSD_WCHAR_T_ +#ifndef _BSD_WCHAR_T_DEFINED_ /* Darwin */ +#ifndef _BSD_RUNE_T_DEFINED_ /* Darwin */ +#ifndef _WCHAR_T_DECLARED /* FreeBSD 5 */ +#ifndef _WCHAR_T_DEFINED_ +#ifndef _WCHAR_T_DEFINED +#ifndef _WCHAR_T_H +#ifndef ___int_wchar_t_h +#ifndef __INT_WCHAR_T_H +#ifndef _GCC_WCHAR_T +#define __wchar_t__ /* BeOS */ +#define __WCHAR_T__ /* Cray Unicos/Mk */ +#define _WCHAR_T +#define _T_WCHAR_ +#define _T_WCHAR +#define __WCHAR_T +#define _WCHAR_T_ +#define _BSD_WCHAR_T_ +#define _WCHAR_T_DEFINED_ +#define _WCHAR_T_DEFINED +#define _WCHAR_T_H +#define ___int_wchar_t_h +#define __INT_WCHAR_T_H +#define _GCC_WCHAR_T +#define _WCHAR_T_DECLARED + +/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_ + instead of _WCHAR_T_, and _BSD_RUNE_T_ (which, unlike the other + symbols in the _FOO_T_ family, stays defined even after its + corresponding type is defined). If we define wchar_t, then we + must undef _WCHAR_T_; for BSD/386 1.1 (and perhaps others), if + we undef _WCHAR_T_, then we must also define rune_t, since + headers like runetype.h assume that if machine/ansi.h is included, + and _BSD_WCHAR_T_ is not defined, then rune_t is available. + machine/ansi.h says, "Note that _WCHAR_T_ and _RUNE_T_ must be of + the same type." */ +#ifdef _BSD_WCHAR_T_ +#undef _BSD_WCHAR_T_ +#ifdef _BSD_RUNE_T_ +#if !defined (_ANSI_SOURCE) && !defined (_POSIX_SOURCE) +typedef _BSD_RUNE_T_ rune_t; +#define _BSD_WCHAR_T_DEFINED_ +#define _BSD_RUNE_T_DEFINED_ /* Darwin */ +#if defined (__FreeBSD__) && (__FreeBSD__ < 5) +/* Why is this file so hard to maintain properly? In contrast to + the comment above regarding BSD/386 1.1, on FreeBSD for as long + as the symbol has existed, _BSD_RUNE_T_ must not stay defined or + redundant typedefs will occur when stdlib.h is included after this file. */ +#undef _BSD_RUNE_T_ +#endif +#endif +#endif +#endif +/* FreeBSD 5 can't be handled well using "traditional" logic above + since it no longer defines _BSD_RUNE_T_ yet still desires to export + rune_t in some cases... */ +#if defined (__FreeBSD__) && (__FreeBSD__ >= 5) +#if !defined (_ANSI_SOURCE) && !defined (_POSIX_SOURCE) +#if __BSD_VISIBLE +#ifndef _RUNE_T_DECLARED +typedef __rune_t rune_t; +#define _RUNE_T_DECLARED +#endif +#endif +#endif +#endif + +#ifndef __WCHAR_TYPE__ +#define __WCHAR_TYPE__ int +#endif +#ifndef __cplusplus +typedef __WCHAR_TYPE__ wchar_t; +#endif +#endif +#endif +#endif +#endif +#endif +#endif +#endif /* _WCHAR_T_DECLARED */ +#endif /* _BSD_RUNE_T_DEFINED_ */ +#endif +#endif +#endif +#endif +#endif +#endif +#endif +#endif /* __WCHAR_T__ */ +#endif /* __wchar_t__ */ +#undef __need_wchar_t +#endif /* _STDDEF_H or __need_wchar_t. */ + +#if defined (__need_wint_t) +#ifndef _WINT_T +#define _WINT_T + +#ifndef __WINT_TYPE__ +#define __WINT_TYPE__ unsigned int +#endif +typedef __WINT_TYPE__ wint_t; +#endif +#undef __need_wint_t +#endif + +/* In 4.3bsd-net2, leave these undefined to indicate that size_t, etc. + are already defined. */ +/* BSD/OS 3.1 and FreeBSD [23].x require the MACHINE_ANSI_H check here. */ +#if defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) +/* The references to _GCC_PTRDIFF_T_, _GCC_SIZE_T_, and _GCC_WCHAR_T_ + are probably typos and should be removed before 2.8 is released. */ +#ifdef _GCC_PTRDIFF_T_ +#undef _PTRDIFF_T_ +#undef _BSD_PTRDIFF_T_ +#endif +#ifdef _GCC_SIZE_T_ +#undef _SIZE_T_ +#undef _BSD_SIZE_T_ +#endif +#ifdef _GCC_WCHAR_T_ +#undef _WCHAR_T_ +#undef _BSD_WCHAR_T_ +#endif +/* The following ones are the real ones. */ +#ifdef _GCC_PTRDIFF_T +#undef _PTRDIFF_T_ +#undef _BSD_PTRDIFF_T_ +#endif +#ifdef _GCC_SIZE_T +#undef _SIZE_T_ +#undef _BSD_SIZE_T_ +#endif +#ifdef _GCC_WCHAR_T +#undef _WCHAR_T_ +#undef _BSD_WCHAR_T_ +#endif +#endif /* _ANSI_H_ || _MACHINE_ANSI_H_ */ + +#endif /* __sys_stdtypes_h */ + +/* A null pointer constant. */ + +#if defined (_STDDEF_H) || defined (__need_NULL) +#undef NULL /* in case has defined it. */ +#ifdef __GNUG__ +#define NULL __null +#else /* G++ */ +#ifndef __cplusplus +#define NULL ((void *)0) +#else /* C++ */ +#define NULL 0 +#endif /* C++ */ +#endif /* G++ */ +#endif /* NULL not defined and or need NULL. */ +#undef __need_NULL + +#ifdef _STDDEF_H + +/* Offset of member MEMBER in a struct of type TYPE. */ +#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER) + +#endif /* _STDDEF_H was defined this time */ + +#endif /* !_STDDEF_H && !_STDDEF_H_ && !_ANSI_STDDEF_H && !__STDDEF_H__ + || __need_XXX was not defined before */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/syslimits.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/syslimits.h new file mode 100644 index 0000000..a449979 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/syslimits.h @@ -0,0 +1,8 @@ +/* syslimits.h stands for the system's own limits.h file. + If we can use it ok unmodified, then we install this text. + If fixincludes fixes it, then the fixed version is installed + instead of this text. */ + +#define _GCC_NEXT_LIMITS_H /* tell gcc's limits.h to recurse */ +/* APPLE LOCAL begin 4401222 */ +/* APPLE LOCAL end 4401222 */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/tgmath.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/tgmath.h new file mode 100644 index 0000000..0874196 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/tgmath.h @@ -0,0 +1,182 @@ +/* APPLE LOCAL file mainline 2007-06-12 2872232 */ +/* Copyright (C) 2004 + Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* + * ISO C Standard: 7.22 Type-generic math + */ + +#ifndef _TGMATH_H +#define _TGMATH_H + +#include + +#ifndef __cplusplus +#include + +/* Naming convention: generic macros are defining using + __TGMATH_CPLX*, __TGMATH_REAL*, and __TGMATH_CPLX_ONLY. _CPLX + means the generic argument(s) may be real or complex, _REAL means + real only, _CPLX means complex only. If there is no suffix, we are + defining a function of one generic argument. If the suffix is _n + it is a function of n generic arguments. If the suffix is _m_n it + is a function of n arguments, the first m of which are generic. We + only define these macros for values of n and/or m that are needed. */ + +/* The general rules for generic macros are given in 7.22 paragraphs 1 and 2. + If any generic parameter is complex, we use a complex version. Otherwise + we use a real version. If the real part of any generic parameter is long + double, we use the long double version. Otherwise if the real part of any + generic paramter is double or of integer type, we use the double version. + Otherwise we use the float version. */ + +#define __tg_cplx(expr) \ + __builtin_classify_type(expr) == 9 + +#define __tg_ldbl(expr) \ + __builtin_types_compatible_p(__typeof__(expr), long double) + +#define __tg_dbl(expr) \ + (__builtin_types_compatible_p(__typeof__(expr), double) \ + || __builtin_classify_type(expr) == 1) + +#define __tg_choose(x,f,d,l) \ + __builtin_choose_expr(__tg_ldbl(x), l, \ + __builtin_choose_expr(__tg_dbl(x), d, \ + f)) + +#define __tg_choose_2(x,y,f,d,l) \ + __builtin_choose_expr(__tg_ldbl(x) || __tg_ldbl(y), l, \ + __builtin_choose_expr(__tg_dbl(x) || __tg_dbl(y), d, \ + f)) + +#define __tg_choose_3(x,y,z,f,d,l) \ + __builtin_choose_expr(__tg_ldbl(x) || __tg_ldbl(y) || __tg_ldbl(z), l, \ + __builtin_choose_expr(__tg_dbl(x) || __tg_dbl(y) \ + || __tg_dbl(z), d, \ + f)) + +#define __TGMATH_CPLX(z,R,C) \ + __builtin_choose_expr (__tg_cplx(z), \ + __tg_choose (__real__(z), C##f(z), (C)(z), C##l(z)), \ + /* APPLE LOCAL shorten-64-to-32 4604239 */ \ + __tg_choose (z, R##f((float)(z)), (R)(z), R##l(z))) + +#define __TGMATH_CPLX_2(z1,z2,R,C) \ + __builtin_choose_expr (__tg_cplx(z1) || __tg_cplx(z2), \ + __tg_choose_2 (__real__(z1), __real__(z2), \ + C##f(z1,z2), (C)(z1,z2), C##l(z1,z2)), \ + __tg_choose_2 (z1, z2, \ + /* APPLE LOCAL shorten-64-to-32 5909621 */ \ + R##f((float)(z1),(float)(z2)), (R)(z1,z2), R##l(z1,z2))) + +#define __TGMATH_REAL(x,R) \ + /* APPLE LOCAL shorten-64-to-32 5909621 */ \ + __tg_choose (x, R##f((float)(x)), (R)(x), R##l(x)) +#define __TGMATH_REAL_2(x,y,R) \ + /* APPLE LOCAL shorten-64-to-32 4604239 */ \ + __tg_choose_2 (x, y, R##f((float)(x),(float)(y)), (R)(x,y), R##l(x,y)) +#define __TGMATH_REAL_3(x,y,z,R) \ + /* APPLE LOCAL shorten-64-to-32 5909621 */ \ + __tg_choose_3 (x, y, z, R##f((float)(x),(float)(y),(float)(z)), (R)(x,y,z), R##l(x,y,z)) +#define __TGMATH_REAL_1_2(x,y,R) \ + /* APPLE LOCAL shorten-64-to-32 5909621 */ \ + __tg_choose (x, R##f((float)(x),y), (R)(x,y), R##l(x,y)) +#define __TGMATH_REAL_2_3(x,y,z,R) \ + /* APPLE LOCAL shorten-64-to-32 5909621 */ \ + __tg_choose_2 (x, y, R##f((float)(x),(float)(y),z), (R)(x,y,z), R##l(x,y,z)) +#define __TGMATH_CPLX_ONLY(z,C) \ + __tg_choose (__real__(z), C##f(z), (C)(z), C##l(z)) + +/* Functions defined in both and (7.22p4) */ +#define acos(z) __TGMATH_CPLX(z, acos, cacos) +#define asin(z) __TGMATH_CPLX(z, asin, casin) +#define atan(z) __TGMATH_CPLX(z, atan, catan) +#define acosh(z) __TGMATH_CPLX(z, acosh, cacosh) +#define asinh(z) __TGMATH_CPLX(z, asinh, casinh) +#define atanh(z) __TGMATH_CPLX(z, atanh, catanh) +#define cos(z) __TGMATH_CPLX(z, cos, ccos) +#define sin(z) __TGMATH_CPLX(z, sin, csin) +#define tan(z) __TGMATH_CPLX(z, tan, ctan) +#define cosh(z) __TGMATH_CPLX(z, cosh, ccosh) +#define sinh(z) __TGMATH_CPLX(z, sinh, csinh) +#define tanh(z) __TGMATH_CPLX(z, tanh, ctanh) +#define exp(z) __TGMATH_CPLX(z, exp, cexp) +#define log(z) __TGMATH_CPLX(z, log, clog) +#define pow(z1,z2) __TGMATH_CPLX_2(z1, z2, pow, cpow) +#define sqrt(z) __TGMATH_CPLX(z, sqrt, csqrt) +#define fabs(z) __TGMATH_CPLX(z, fabs, cabs) + +/* Functions defined in only (7.22p5) */ +#define atan2(x,y) __TGMATH_REAL_2(x, y, atan2) +#define cbrt(x) __TGMATH_REAL(x, cbrt) +#define ceil(x) __TGMATH_REAL(x, ceil) +#define copysign(x,y) __TGMATH_REAL_2(x, y, copysign) +#define erf(x) __TGMATH_REAL(x, erf) +#define erfc(x) __TGMATH_REAL(x, erfc) +#define exp2(x) __TGMATH_REAL(x, exp2) +#define expm1(x) __TGMATH_REAL(x, expm1) +#define fdim(x,y) __TGMATH_REAL_2(x, y, fdim) +#define floor(x) __TGMATH_REAL(x, floor) +#define fma(x,y,z) __TGMATH_REAL_3(x, y, z, fma) +#define fmax(x,y) __TGMATH_REAL_2(x, y, fmax) +#define fmin(x,y) __TGMATH_REAL_2(x, y, fmin) +#define fmod(x,y) __TGMATH_REAL_2(x, y, fmod) +#define frexp(x,y) __TGMATH_REAL_1_2(x, y, frexp) +#define hypot(x,y) __TGMATH_REAL_2(x, y, hypot) +#define ilogb(x) __TGMATH_REAL(x, ilogb) +#define ldexp(x,y) __TGMATH_REAL_1_2(x, y, ldexp) +#define lgamma(x) __TGMATH_REAL(x, lgamma) +#define llrint(x) __TGMATH_REAL(x, llrint) +#define llround(x) __TGMATH_REAL(x, llround) +#define log10(x) __TGMATH_REAL(x, log10) +#define log1p(x) __TGMATH_REAL(x, log1p) +#define log2(x) __TGMATH_REAL(x, log2) +#define logb(x) __TGMATH_REAL(x, logb) +#define lrint(x) __TGMATH_REAL(x, lrint) +#define lround(x) __TGMATH_REAL(x, lround) +#define nearbyint(x) __TGMATH_REAL(x, nearbyint) +#define nextafter(x,y) __TGMATH_REAL_2(x, y, nextafter) +#define nexttoward(x,y) __TGMATH_REAL_1_2(x, y, nexttoward) +#define remainder(x,y) __TGMATH_REAL_2(x, y, remainder) +#define remquo(x,y,z) __TGMATH_REAL_2_3(x, y, z, remquo) +#define rint(x) __TGMATH_REAL(x, rint) +#define round(x) __TGMATH_REAL(x, round) +#define scalbn(x,y) __TGMATH_REAL_1_2(x, y, scalbn) +#define scalbln(x,y) __TGMATH_REAL_1_2(x, y, scalbln) +#define tgamma(x) __TGMATH_REAL(x, tgamma) +#define trunc(x) __TGMATH_REAL(x, trunc) + +/* Functions defined in only (7.22p6) */ +#define carg(z) __TGMATH_CPLX_ONLY(z, carg) +#define cimag(z) __TGMATH_CPLX_ONLY(z, cimag) +#define conj(z) __TGMATH_CPLX_ONLY(z, conj) +#define cproj(z) __TGMATH_CPLX_ONLY(z, cproj) +#define creal(z) __TGMATH_CPLX_ONLY(z, creal) + +#endif /* __cplusplus */ +#endif /* _TGMATH_H */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/tmmintrin.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/tmmintrin.h new file mode 100644 index 0000000..1bb254b --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/tmmintrin.h @@ -0,0 +1,304 @@ +/* APPLE LOCAL file ssse3 4424835 */ +/* Copyright (C) 2006 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.1. */ + +#ifndef _TMMINTRIN_H_INCLUDED +#define _TMMINTRIN_H_INCLUDED + +#ifdef __SSSE3__ +#include + +/* APPLE LOCAL begin nodebug inline */ +#define __always_inline__ __always_inline__, __nodebug__ +/* APPLE LOCAL end nodebug inline */ + +/* APPLE LOCAL begin radar 5618945 */ +#undef __STATIC_INLINE +#ifdef __GNUC_STDC_INLINE__ +#define __STATIC_INLINE __inline +#else +#define __STATIC_INLINE static __inline +#endif +/* APPLE LOCAL end radar 5618945 */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hadd_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phaddw128 ((__v8hi)__X, (__v8hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hadd_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phaddd128 ((__v4si)__X, (__v4si)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hadds_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phaddsw128 ((__v8hi)__X, (__v8hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hadd_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phaddw ((__v4hi)__X, (__v4hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hadd_pi32 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phaddd ((__v2si)__X, (__v2si)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hadds_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phaddsw ((__v4hi)__X, (__v4hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hsub_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phsubw128 ((__v8hi)__X, (__v8hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hsub_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phsubd128 ((__v4si)__X, (__v4si)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hsubs_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phsubsw128 ((__v8hi)__X, (__v8hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hsub_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phsubw ((__v4hi)__X, (__v4hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hsub_pi32 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phsubd ((__v2si)__X, (__v2si)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_hsubs_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phsubsw ((__v4hi)__X, (__v4hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_maddubs_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaddubsw128 ((__v16qi)__X, (__v16qi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_maddubs_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_pmaddubsw ((__v8qi)__X, (__v8qi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mulhrs_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmulhrsw128 ((__v8hi)__X, (__v8hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mulhrs_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_pmulhrsw ((__v4hi)__X, (__v4hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_shuffle_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pshufb128 ((__v16qi)__X, (__v16qi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_shuffle_pi8 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_pshufb ((__v8qi)__X, (__v8qi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sign_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psignb128 ((__v16qi)__X, (__v16qi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sign_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psignw128 ((__v8hi)__X, (__v8hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sign_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psignd128 ((__v4si)__X, (__v4si)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sign_pi8 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_psignb ((__v8qi)__X, (__v8qi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sign_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_psignw ((__v4hi)__X, (__v4hi)__Y); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sign_pi32 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_psignd ((__v2si)__X, (__v2si)__Y); +} + +/* APPLE LOCAL begin 5814283 */ +#define _mm_alignr_epi8(__X, __Y, __N) \ + ((__m128i)__builtin_ia32_palignr128 ((__v2di)(__X), (__v2di)(__Y), (__N) * 8)) +/* APPLE LOCAL end 5814283 */ + +#define _mm_alignr_pi8(__X, __Y, __N) \ + ((__m64)__builtin_ia32_palignr ((long long) (__X), (long long) (__Y), (__N) * 8)) + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_abs_epi8 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pabsb128 ((__v16qi)__X); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_abs_epi16 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pabsw128 ((__v8hi)__X); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128i __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_abs_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pabsd128 ((__v4si)__X); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_abs_pi8 (__m64 __X) +{ + return (__m64) __builtin_ia32_pabsb ((__v8qi)__X); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_abs_pi16 (__m64 __X) +{ + return (__m64) __builtin_ia32_pabsw ((__v4hi)__X); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_abs_pi32 (__m64 __X) +{ + return (__m64) __builtin_ia32_pabsd ((__v2si)__X); +} + +/* APPLE LOCAL begin nodebug inline */ +#undef __always_inline__ +/* APPLE LOCAL end nodebug inline */ + +#endif /* __SSSE3__ */ + +#endif /* _TMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/unwind.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/unwind.h new file mode 100644 index 0000000..3f4c065 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/unwind.h @@ -0,0 +1,241 @@ +/* Exception handling and frame unwind runtime interface routines. + Copyright (C) 2001, 2003, 2004, 2006 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to the Free + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* This is derived from the C++ ABI for IA-64. Where we diverge + for cross-architecture compatibility are noted with "@@@". */ + +#ifndef _UNWIND_H +#define _UNWIND_H + +#ifndef HIDE_EXPORTS +#pragma GCC visibility push(default) +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* Level 1: Base ABI */ + +/* @@@ The IA-64 ABI uses uint64 throughout. Most places this is + inefficient for 32-bit and smaller machines. */ +typedef unsigned _Unwind_Word __attribute__((__mode__(__word__))); +typedef signed _Unwind_Sword __attribute__((__mode__(__word__))); +#if defined(__ia64__) && defined(__hpux__) +typedef unsigned _Unwind_Ptr __attribute__((__mode__(__word__))); +#else +typedef unsigned _Unwind_Ptr __attribute__((__mode__(__pointer__))); +#endif +typedef unsigned _Unwind_Internal_Ptr __attribute__((__mode__(__pointer__))); + +/* @@@ The IA-64 ABI uses a 64-bit word to identify the producer and + consumer of an exception. We'll go along with this for now even on + 32-bit machines. We'll need to provide some other option for + 16-bit machines and for machines with > 8 bits per byte. */ +typedef unsigned _Unwind_Exception_Class __attribute__((__mode__(__DI__))); + +/* The unwind interface uses reason codes in several contexts to + identify the reasons for failures or other actions. */ +typedef enum +{ + _URC_NO_REASON = 0, + _URC_FOREIGN_EXCEPTION_CAUGHT = 1, + _URC_FATAL_PHASE2_ERROR = 2, + _URC_FATAL_PHASE1_ERROR = 3, + _URC_NORMAL_STOP = 4, + _URC_END_OF_STACK = 5, + _URC_HANDLER_FOUND = 6, + _URC_INSTALL_CONTEXT = 7, + _URC_CONTINUE_UNWIND = 8 +} _Unwind_Reason_Code; + + +/* The unwind interface uses a pointer to an exception header object + as its representation of an exception being thrown. In general, the + full representation of an exception object is language- and + implementation-specific, but it will be prefixed by a header + understood by the unwind interface. */ + +struct _Unwind_Exception; + +typedef void (*_Unwind_Exception_Cleanup_Fn) (_Unwind_Reason_Code, + struct _Unwind_Exception *); + +struct _Unwind_Exception +{ + _Unwind_Exception_Class exception_class; + _Unwind_Exception_Cleanup_Fn exception_cleanup; + _Unwind_Word private_1; + _Unwind_Word private_2; + + /* @@@ The IA-64 ABI says that this structure must be double-word aligned. + Taking that literally does not make much sense generically. Instead we + provide the maximum alignment required by any type for the machine. */ +} __attribute__((__aligned__)); + + +/* The ACTIONS argument to the personality routine is a bitwise OR of one + or more of the following constants. */ +typedef int _Unwind_Action; + +#define _UA_SEARCH_PHASE 1 +#define _UA_CLEANUP_PHASE 2 +#define _UA_HANDLER_FRAME 4 +#define _UA_FORCE_UNWIND 8 +#define _UA_END_OF_STACK 16 + +/* This is an opaque type used to refer to a system-specific data + structure used by the system unwinder. This context is created and + destroyed by the system, and passed to the personality routine + during unwinding. */ +struct _Unwind_Context; + +/* Raise an exception, passing along the given exception object. */ +extern _Unwind_Reason_Code _Unwind_RaiseException (struct _Unwind_Exception *); + +/* Raise an exception for forced unwinding. */ + +typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn) + (int, _Unwind_Action, _Unwind_Exception_Class, + struct _Unwind_Exception *, struct _Unwind_Context *, void *); + +extern _Unwind_Reason_Code _Unwind_ForcedUnwind (struct _Unwind_Exception *, + _Unwind_Stop_Fn, + void *); + +/* Helper to invoke the exception_cleanup routine. */ +extern void _Unwind_DeleteException (struct _Unwind_Exception *); + +/* Resume propagation of an existing exception. This is used after + e.g. executing cleanup code, and not to implement rethrowing. */ +extern void _Unwind_Resume (struct _Unwind_Exception *); + +/* @@@ Resume propagation of an FORCE_UNWIND exception, or to rethrow + a normal exception that was handled. */ +extern _Unwind_Reason_Code _Unwind_Resume_or_Rethrow (struct _Unwind_Exception *); + +/* @@@ Use unwind data to perform a stack backtrace. The trace callback + is called for every stack frame in the call chain, but no cleanup + actions are performed. */ +typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn) + (struct _Unwind_Context *, void *); + +extern _Unwind_Reason_Code _Unwind_Backtrace (_Unwind_Trace_Fn, void *); + +/* These functions are used for communicating information about the unwind + context (i.e. the unwind descriptors and the user register state) between + the unwind library and the personality routine and landing pad. Only + selected registers maybe manipulated. */ + +extern _Unwind_Word _Unwind_GetGR (struct _Unwind_Context *, int); +extern void _Unwind_SetGR (struct _Unwind_Context *, int, _Unwind_Word); + +extern _Unwind_Ptr _Unwind_GetIP (struct _Unwind_Context *); +extern _Unwind_Ptr _Unwind_GetIPInfo (struct _Unwind_Context *, int *); +extern void _Unwind_SetIP (struct _Unwind_Context *, _Unwind_Ptr); + +/* @@@ Retrieve the CFA of the given context. */ +extern _Unwind_Word _Unwind_GetCFA (struct _Unwind_Context *); + +extern void *_Unwind_GetLanguageSpecificData (struct _Unwind_Context *); + +extern _Unwind_Ptr _Unwind_GetRegionStart (struct _Unwind_Context *); + + +/* The personality routine is the function in the C++ (or other language) + runtime library which serves as an interface between the system unwind + library and language-specific exception handling semantics. It is + specific to the code fragment described by an unwind info block, and + it is always referenced via the pointer in the unwind info block, and + hence it has no ABI-specified name. + + Note that this implies that two different C++ implementations can + use different names, and have different contents in the language + specific data area. Moreover, that the language specific data + area contains no version info because name of the function invoked + provides more effective versioning by detecting at link time the + lack of code to handle the different data format. */ + +typedef _Unwind_Reason_Code (*_Unwind_Personality_Fn) + (int, _Unwind_Action, _Unwind_Exception_Class, + struct _Unwind_Exception *, struct _Unwind_Context *); + +/* @@@ The following alternate entry points are for setjmp/longjmp + based unwinding. */ + +struct SjLj_Function_Context; +extern void _Unwind_SjLj_Register (struct SjLj_Function_Context *); +extern void _Unwind_SjLj_Unregister (struct SjLj_Function_Context *); + +extern _Unwind_Reason_Code _Unwind_SjLj_RaiseException + (struct _Unwind_Exception *); +extern _Unwind_Reason_Code _Unwind_SjLj_ForcedUnwind + (struct _Unwind_Exception *, _Unwind_Stop_Fn, void *); +extern void _Unwind_SjLj_Resume (struct _Unwind_Exception *); +extern _Unwind_Reason_Code _Unwind_SjLj_Resume_or_Rethrow (struct _Unwind_Exception *); + +/* @@@ The following provide access to the base addresses for text + and data-relative addressing in the LDSA. In order to stay link + compatible with the standard ABI for IA-64, we inline these. */ + +#ifdef __ia64__ +#include + +static inline _Unwind_Ptr +_Unwind_GetDataRelBase (struct _Unwind_Context *_C) +{ + /* The GP is stored in R1. */ + return _Unwind_GetGR (_C, 1); +} + +static inline _Unwind_Ptr +_Unwind_GetTextRelBase (struct _Unwind_Context *_C __attribute__ ((__unused__))) +{ + abort (); + return 0; +} + +/* @@@ Retrieve the Backing Store Pointer of the given context. */ +extern _Unwind_Word _Unwind_GetBSP (struct _Unwind_Context *); +#else +extern _Unwind_Ptr _Unwind_GetDataRelBase (struct _Unwind_Context *); +extern _Unwind_Ptr _Unwind_GetTextRelBase (struct _Unwind_Context *); +#endif + +/* @@@ Given an address, return the entry point of the function that + contains it. */ +extern void * _Unwind_FindEnclosingFunction (void *pc); + +#ifdef __cplusplus +} +#endif + +#ifndef HIDE_EXPORTS +#pragma GCC visibility pop +#endif + +#endif /* unwind.h */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/varargs.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/varargs.h new file mode 100644 index 0000000..4b9803e --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/varargs.h @@ -0,0 +1,7 @@ +#ifndef _VARARGS_H +#define _VARARGS_H + +#error "GCC no longer implements ." +#error "Revise your code to use ." + +#endif diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/include/xmmintrin.h b/lib/gcc/i686-apple-darwin11/4.2.1/include/xmmintrin.h new file mode 100644 index 0000000..ad805b8 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/include/xmmintrin.h @@ -0,0 +1,1582 @@ +/* APPLE LOCAL file mainline 2005-06-30 Radar 4131077 */ +/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 + Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to + the Free Software Foundation, 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +/* As a special exception, if you include this header file into source + files compiled by GCC, this header file does not by itself cause + the resulting executable to be covered by the GNU General Public + License. This exception does not however invalidate any other + reasons why the executable file might be covered by the GNU General + Public License. */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef _XMMINTRIN_H_INCLUDED +#define _XMMINTRIN_H_INCLUDED + +#ifndef __SSE__ +# error "SSE instruction set not enabled" +#else + +/* We need type definitions from the MMX header file. */ +#include + +/* Get _mm_malloc () and _mm_free (). */ +/* APPLE LOCAL begin xmmintrin.h for kernel 4123064 */ +#if __STDC_HOSTED__ +#include +#endif +/* APPLE LOCAL end xmmintrin.h for kernel 4123064 */ + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__)); + +/* Internal data types for implementing the intrinsics. */ +typedef float __v4sf __attribute__ ((__vector_size__ (16))); + +/* Create a selector for use with the SHUFPS instruction. */ +#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \ + (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0)) + +/* Constants for use with _mm_prefetch. */ +enum _mm_hint +{ + _MM_HINT_T0 = 3, + _MM_HINT_T1 = 2, + _MM_HINT_T2 = 1, + _MM_HINT_NTA = 0 +}; + +/* Bits in the MXCSR. */ +#define _MM_EXCEPT_MASK 0x003f +#define _MM_EXCEPT_INVALID 0x0001 +#define _MM_EXCEPT_DENORM 0x0002 +#define _MM_EXCEPT_DIV_ZERO 0x0004 +#define _MM_EXCEPT_OVERFLOW 0x0008 +#define _MM_EXCEPT_UNDERFLOW 0x0010 +#define _MM_EXCEPT_INEXACT 0x0020 + +#define _MM_MASK_MASK 0x1f80 +#define _MM_MASK_INVALID 0x0080 +#define _MM_MASK_DENORM 0x0100 +#define _MM_MASK_DIV_ZERO 0x0200 +#define _MM_MASK_OVERFLOW 0x0400 +#define _MM_MASK_UNDERFLOW 0x0800 +#define _MM_MASK_INEXACT 0x1000 + +#define _MM_ROUND_MASK 0x6000 +#define _MM_ROUND_NEAREST 0x0000 +#define _MM_ROUND_DOWN 0x2000 +#define _MM_ROUND_UP 0x4000 +#define _MM_ROUND_TOWARD_ZERO 0x6000 + +#define _MM_FLUSH_ZERO_MASK 0x8000 +#define _MM_FLUSH_ZERO_ON 0x8000 +#define _MM_FLUSH_ZERO_OFF 0x0000 + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#define __always_inline__ __always_inline__, __nodebug__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +/* APPLE LOCAL begin radar 5618945 */ +#undef __STATIC_INLINE +#ifdef __GNUC_STDC_INLINE__ +#define __STATIC_INLINE __inline +#else +#define __STATIC_INLINE static __inline +#endif +/* APPLE LOCAL end radar 5618945 */ + +/* Create a vector of zeros. */ +/* APPLE LOCAL begin radar 4152603 */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setzero_ps (void) +{ + return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f }; +} + +/* Perform the respective operation on the lower SPFP (single-precision + floating-point) values of A and B; the upper three SPFP values are + passed through from A. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mul_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_div_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sqrt_ss (__m128 __A) +{ + return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_rcp_ss (__m128 __A) +{ + return (__m128) __builtin_ia32_rcpss ((__v4sf)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_rsqrt_ss (__m128 __A) +{ + return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_min_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_max_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B); +} + +/* Perform the respective operation on the four SPFP values in A and B. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_add_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sub_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mul_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_div_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sqrt_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_rcp_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_rcpps ((__v4sf)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_rsqrt_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_min_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_max_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B); +} + +/* Perform logical bit-wise operations on 128-bit values. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_and_ps (__m128 __A, __m128 __B) +{ + return __builtin_ia32_andps (__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_andnot_ps (__m128 __A, __m128 __B) +{ + return __builtin_ia32_andnps (__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_or_ps (__m128 __A, __m128 __B) +{ + return __builtin_ia32_orps (__A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_xor_ps (__m128 __A, __m128 __B) +{ + return __builtin_ia32_xorps (__A, __B); +} + +/* Perform a comparison on the lower SPFP values of A and B. If the + comparison is true, place a mask of all ones in the result, otherwise a + mask of zeros. The upper three SPFP values are passed through from A. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmplt_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmple_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf) __A, + (__v4sf) + __builtin_ia32_cmpltss ((__v4sf) __B, + (__v4sf) + __A)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpge_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf) __A, + (__v4sf) + __builtin_ia32_cmpless ((__v4sf) __B, + (__v4sf) + __A)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpneq_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnlt_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnle_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpngt_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf) __A, + (__v4sf) + __builtin_ia32_cmpnltss ((__v4sf) __B, + (__v4sf) + __A)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnge_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf) __A, + (__v4sf) + __builtin_ia32_cmpnless ((__v4sf) __B, + (__v4sf) + __A)); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpord_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpunord_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B); +} + +/* Perform a comparison on the four SPFP values of A and B. For each + element, if the comparison is true, place a mask of all ones in the + result, otherwise a mask of zeros. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpeq_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmplt_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmple_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpgt_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpge_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpneq_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnlt_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnle_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpngt_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpnge_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpord_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cmpunord_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B); +} + +/* Compare the lower SPFP values of A and B and return 1 if true + and 0 if false. */ + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comieq_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comilt_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comile_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comigt_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comige_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_comineq_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomieq_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomilt_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomile_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomigt_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomige_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_ucomineq_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B); +} + +/* Convert the lower SPFP value to a 32-bit integer according to the current + rounding mode. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtss_si32 (__m128 __A) +{ + return __builtin_ia32_cvtss2si ((__v4sf) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvt_ss2si (__m128 __A) +{ + return _mm_cvtss_si32 (__A); +} + +#ifdef __x86_64__ +/* Convert the lower SPFP value to a 32-bit integer according to the + current rounding mode. */ + +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtss_si64 (__m128 __A) +{ + return __builtin_ia32_cvtss2si64 ((__v4sf) __A); +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtss_si64x (__m128 __A) +{ + return __builtin_ia32_cvtss2si64 ((__v4sf) __A); +} +#endif + +/* Convert the two lower SPFP values to 32-bit integers according to the + current rounding mode. Return the integers in packed form. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtps_pi32 (__m128 __A) +{ + return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvt_ps2pi (__m128 __A) +{ + return _mm_cvtps_pi32 (__A); +} + +/* Truncate the lower SPFP value to a 32-bit integer. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttss_si32 (__m128 __A) +{ + return __builtin_ia32_cvttss2si ((__v4sf) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtt_ss2si (__m128 __A) +{ + return _mm_cvttss_si32 (__A); +} + +#ifdef __x86_64__ +/* Truncate the lower SPFP value to a 32-bit integer. */ + +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttss_si64 (__m128 __A) +{ + return __builtin_ia32_cvttss2si64 ((__v4sf) __A); +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE long long __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttss_si64x (__m128 __A) +{ + return __builtin_ia32_cvttss2si64 ((__v4sf) __A); +} +#endif + +/* Truncate the two lower SPFP values to 32-bit integers. Return the + integers in packed form. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvttps_pi32 (__m128 __A) +{ + return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtt_ps2pi (__m128 __A) +{ + return _mm_cvttps_pi32 (__A); +} + +/* Convert B to a SPFP value and insert it as element zero in A. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi32_ss (__m128 __A, int __B) +{ + return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvt_si2ss (__m128 __A, int __B) +{ + return _mm_cvtsi32_ss (__A, __B); +} + +#ifdef __x86_64__ +/* Convert B to a SPFP value and insert it as element zero in A. */ + +/* Intel intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64_ss (__m128 __A, long long __B) +{ + return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B); +} + +/* Microsoft intrinsic. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtsi64x_ss (__m128 __A, long long __B) +{ + return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B); +} +#endif + +/* Convert the two 32-bit values in B to SPFP form and insert them + as the two lower elements in A. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpi32_ps (__m128 __A, __m64 __B) +{ + return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvt_pi2ps (__m128 __A, __m64 __B) +{ + return _mm_cvtpi32_ps (__A, __B); +} + +/* Convert the four signed 16-bit values in A to SPFP form. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpi16_ps (__m64 __A) +{ + __v4hi __sign; + __v2si __hisi, __losi; + __v4sf __r; + + /* This comparison against zero gives us a mask that can be used to + fill in the missing sign bits in the unpack operations below, so + that we get signed values after unpacking. */ + __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A); + + /* Convert the four words to doublewords. */ + __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign); + __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign); + + /* Convert the doublewords to floating point two at a time. */ + __r = (__v4sf) _mm_setzero_ps (); + __r = __builtin_ia32_cvtpi2ps (__r, __hisi); + __r = __builtin_ia32_movlhps (__r, __r); + __r = __builtin_ia32_cvtpi2ps (__r, __losi); + + return (__m128) __r; +} + +/* Convert the four unsigned 16-bit values in A to SPFP form. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpu16_ps (__m64 __A) +{ + __v2si __hisi, __losi; + __v4sf __r; + + /* Convert the four words to doublewords. */ + __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL); + __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL); + + /* Convert the doublewords to floating point two at a time. */ + __r = (__v4sf) _mm_setzero_ps (); + __r = __builtin_ia32_cvtpi2ps (__r, __hisi); + __r = __builtin_ia32_movlhps (__r, __r); + __r = __builtin_ia32_cvtpi2ps (__r, __losi); + + return (__m128) __r; +} + +/* Convert the low four signed 8-bit values in A to SPFP form. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpi8_ps (__m64 __A) +{ + __v8qi __sign; + + /* This comparison against zero gives us a mask that can be used to + fill in the missing sign bits in the unpack operations below, so + that we get signed values after unpacking. */ + __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A); + + /* Convert the four low bytes to words. */ + __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign); + + return _mm_cvtpi16_ps(__A); +} + +/* Convert the low four unsigned 8-bit values in A to SPFP form. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpu8_ps(__m64 __A) +{ + __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL); + return _mm_cvtpu16_ps(__A); +} + +/* Convert the four signed 32-bit values in A and B to SPFP form. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtpi32x2_ps(__m64 __A, __m64 __B) +{ + __v4sf __zero = (__v4sf) _mm_setzero_ps (); + __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A); + __v4sf __sfb = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__B); + return (__m128) __builtin_ia32_movlhps (__sfa, __sfb); +} + +/* Convert the four SPFP values in A to four signed 16-bit integers. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtps_pi16(__m128 __A) +{ + __v4sf __hisf = (__v4sf)__A; + __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf); + __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf); + __v2si __losi = __builtin_ia32_cvtps2pi (__losf); + return (__m64) __builtin_ia32_packssdw (__hisi, __losi); +} + +/* Convert the four SPFP values in A to four signed 8-bit integers. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtps_pi8(__m128 __A) +{ + __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A); + return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL); +} + +/* Selects four specific SPFP values from A and B based on MASK. */ +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_shuffle_ps (__m128 __A, __m128 __B, int __mask) +{ + return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask); +} +#else +#define _mm_shuffle_ps(A, B, MASK) \ + ((__m128) __builtin_ia32_shufps ((__v4sf)(A), (__v4sf)(B), (MASK))) +#endif + + +/* Selects and interleaves the upper two SPFP values from A and B. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpackhi_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B); +} + +/* Selects and interleaves the lower two SPFP values from A and B. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_unpacklo_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B); +} + +/* Sets the upper two SPFP values with 64-bits of data loaded from P; + the lower two values are passed through from A. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadh_pi (__m128 __A, __m64 const *__P) +{ + return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (__v2si *)__P); +} + +/* Stores the upper two SPFP values of A into P. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storeh_pi (__m64 *__P, __m128 __A) +{ + __builtin_ia32_storehps ((__v2si *)__P, (__v4sf)__A); +} + +/* Moves the upper two values of B into the lower two values of A. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movehl_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B); +} + +/* Moves the lower two values of B into the upper two values of A. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movelh_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B); +} + +/* Sets the lower two SPFP values with 64-bits of data loaded from P; + the upper two values are passed through from A. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadl_pi (__m128 __A, __m64 const *__P) +{ + return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (__v2si *)__P); +} + +/* Stores the lower two SPFP values of A into P. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storel_pi (__m64 *__P, __m128 __A) +{ + __builtin_ia32_storelps ((__v2si *)__P, (__v4sf)__A); +} + +/* Creates a 4-bit mask from the most significant bits of the SPFP values. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movemask_ps (__m128 __A) +{ + return __builtin_ia32_movmskps ((__v4sf)__A); +} + +/* Return the contents of the control register. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE unsigned int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_getcsr (void) +{ + return __builtin_ia32_stmxcsr (); +} + +/* Read exception bits from the control register. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE unsigned int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_MM_GET_EXCEPTION_STATE (void) +{ + return _mm_getcsr() & _MM_EXCEPT_MASK; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE unsigned int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_MM_GET_EXCEPTION_MASK (void) +{ + return _mm_getcsr() & _MM_MASK_MASK; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE unsigned int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_MM_GET_ROUNDING_MODE (void) +{ + return _mm_getcsr() & _MM_ROUND_MASK; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE unsigned int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_MM_GET_FLUSH_ZERO_MODE (void) +{ + return _mm_getcsr() & _MM_FLUSH_ZERO_MASK; +} + +/* Set the control register to I. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setcsr (unsigned int __I) +{ + __builtin_ia32_ldmxcsr (__I); +} + +/* Set exception bits in the control register. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_MM_SET_EXCEPTION_STATE(unsigned int __mask) +{ + _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_MM_SET_EXCEPTION_MASK (unsigned int __mask) +{ + _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_MM_SET_ROUNDING_MODE (unsigned int __mode) +{ + _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_MM_SET_FLUSH_ZERO_MODE (unsigned int __mode) +{ + _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode); +} + +/* Create a vector with element 0 as F and the rest zero. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_ss (float __F) +{ + return __extension__ (__m128)(__v4sf){ __F, 0, 0, 0 }; +} + +/* Create a vector with all four elements equal to F. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set1_ps (float __F) +{ + return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F }; +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_ps1 (float __F) +{ + return _mm_set1_ps (__F); +} + +/* Create a vector with element 0 as *P and the rest zero. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load_ss (float const *__P) +{ + return _mm_set_ss (*__P); +} + +/* Create a vector with all four elements equal to *P. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load1_ps (float const *__P) +{ + return _mm_set1_ps (*__P); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load_ps1 (float const *__P) +{ + return _mm_load1_ps (__P); +} + +/* Load four SPFP values from P. The address must be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_load_ps (float const *__P) +{ + return (__m128) *(__v4sf *)__P; +} + +/* Load four SPFP values from P. The address need not be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadu_ps (float const *__P) +{ + return (__m128) __builtin_ia32_loadups (__P); +} + +/* Load four SPFP values in reverse order. The address must be aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_loadr_ps (float const *__P) +{ + __v4sf __tmp = *(__v4sf *)__P; + return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3)); +} + +/* Create the vector [Z Y X W]. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W) +{ + return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z }; +} + +/* Create the vector [W X Y Z]. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_setr_ps (float __Z, float __Y, float __X, float __W) +{ + return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W }; +} + +/* Stores the lower SPFP value. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store_ss (float *__P, __m128 __A) +{ + *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE float __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_cvtss_f32 (__m128 __A) +{ + return __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0); +} + +/* Store four SPFP values. The address must be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store_ps (float *__P, __m128 __A) +{ + *(__v4sf *)__P = (__v4sf)__A; +} + +/* Store four SPFP values. The address need not be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storeu_ps (float *__P, __m128 __A) +{ + __builtin_ia32_storeups (__P, (__v4sf)__A); +} + +/* Store the lower SPFP value across four words. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store1_ps (float *__P, __m128 __A) +{ + __v4sf __va = (__v4sf)__A; + __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0)); + _mm_storeu_ps (__P, __tmp); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_store_ps1 (float *__P, __m128 __A) +{ + _mm_store1_ps (__P, __A); +} + +/* Store four SPFP values in reverse order. The address must be aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_storer_ps (float *__P, __m128 __A) +{ + __v4sf __va = (__v4sf)__A; + __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3)); + _mm_store_ps (__P, __tmp); +} + +/* Sets the low SPFP value of A from the low value of B. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m128 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_move_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B); +} + +/* Extracts one of the four words of A. The selector N must be immediate. */ +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_extract_pi16 (__m64 const __A, int const __N) +{ + return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pextrw (__m64 const __A, int const __N) +{ + return _mm_extract_pi16 (__A, __N); +} +#else +#define _mm_extract_pi16(A, N) __builtin_ia32_vec_ext_v4hi ((__v4hi)(A), (N)) +#define _m_pextrw(A, N) _mm_extract_pi16((A), (N)) +#endif + +/* Inserts word D into one of four words of A. The selector N must be + immediate. */ +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_insert_pi16 (__m64 const __A, int const __D, int const __N) +{ + return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pinsrw (__m64 const __A, int const __D, int const __N) +{ + return _mm_insert_pi16 (__A, __D, __N); +} +#else +#define _mm_insert_pi16(A, D, N) \ + ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(A), (D), (N))) +#define _m_pinsrw(A, D, N) _mm_insert_pi16((A), (D), (N)) +#endif + +/* Compute the element-wise maximum of signed 16-bit values. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_max_pi16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pmaxsw (__m64 __A, __m64 __B) +{ + return _mm_max_pi16 (__A, __B); +} + +/* Compute the element-wise maximum of unsigned 8-bit values. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_max_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pmaxub (__m64 __A, __m64 __B) +{ + return _mm_max_pu8 (__A, __B); +} + +/* Compute the element-wise minimum of signed 16-bit values. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_min_pi16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pminsw (__m64 __A, __m64 __B) +{ + return _mm_min_pi16 (__A, __B); +} + +/* Compute the element-wise minimum of unsigned 8-bit values. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_min_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pminub (__m64 __A, __m64 __B) +{ + return _mm_min_pu8 (__A, __B); +} + +/* Create an 8-bit mask of the signs of 8-bit values. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_movemask_pi8 (__m64 __A) +{ + return __builtin_ia32_pmovmskb ((__v8qi)__A); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE int __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pmovmskb (__m64 __A) +{ + return _mm_movemask_pi8 (__A); +} + +/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values + in B and produce the high 16 bits of the 32-bit results. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_mulhi_pu16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pmulhuw (__m64 __A, __m64 __B) +{ + return _mm_mulhi_pu16 (__A, __B); +} + +/* Return a combination of the four 16-bit values in A. The selector + must be an immediate. */ +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_shuffle_pi16 (__m64 __A, int __N) +{ + return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pshufw (__m64 __A, int __N) +{ + return _mm_shuffle_pi16 (__A, __N); +} +#else +#define _mm_shuffle_pi16(A, N) \ + ((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N))) +#define _m_pshufw(A, N) _mm_shuffle_pi16 ((A), (N)) +#endif + +/* Conditionally store byte elements of A into P. The high bit of each + byte in the selector N determines whether the corresponding byte from + A is stored. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P) +{ + __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_maskmovq (__m64 __A, __m64 __N, char *__P) +{ + _mm_maskmove_si64 (__A, __N, __P); +} + +/* Compute the rounded averages of the unsigned 8-bit values in A and B. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_avg_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pavgb (__m64 __A, __m64 __B) +{ + return _mm_avg_pu8 (__A, __B); +} + +/* Compute the rounded averages of the unsigned 16-bit values in A and B. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_avg_pu16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_pavgw (__m64 __A, __m64 __B) +{ + return _mm_avg_pu16 (__A, __B); +} + +/* Compute the sum of the absolute differences of the unsigned 8-bit + values in A and B. Return the value in the lower 16-bit word; the + upper words are cleared. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sad_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B); +} + +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE __m64 __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_m_psadbw (__m64 __A, __m64 __B) +{ + return _mm_sad_pu8 (__A, __B); +} + +/* Loads one cache line from address P to a location "closer" to the + processor. The selector I specifies the type of prefetch operation. */ +#if 0 +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_prefetch (void *__P, enum _mm_hint __I) +{ + __builtin_prefetch (__P, 0, __I); +} +#else +#define _mm_prefetch(P, I) \ + __builtin_prefetch ((P), 0, (I)) +#endif + +/* Stores the data in A to the address P without polluting the caches. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_stream_pi (__m64 *__P, __m64 __A) +{ + /* APPLE LOCAL 4656532 use V1DImode for _m64 */ + __builtin_ia32_movntq (__P, __A); +} + +/* Likewise. The address must be 16-byte aligned. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_stream_ps (float *__P, __m128 __A) +{ + __builtin_ia32_movntps (__P, (__v4sf)__A); +} + +/* Guarantees that every preceding store is globally visible before + any subsequent store. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_sfence (void) +{ + __builtin_ia32_sfence (); +} + +/* The execution of the next instruction is delayed by an implementation + specific amount of time. The instruction does not modify the + architectural state. */ +/* APPLE LOCAL begin radar 5618945 */ +__STATIC_INLINE void __attribute__((__always_inline__)) +/* APPLE LOCAL end radar 5618945 */ +_mm_pause (void) +{ + __asm__ __volatile__ ("rep; nop" : : ); +} +/* APPLE LOCAL end radar 4152603 */ + +/* Transpose the 4x4 matrix composed of row[0-3]. */ +#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ +do { \ + __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \ + __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \ + __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3); \ + __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1); \ + __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \ + (row0) = __builtin_ia32_movlhps (__t0, __t1); \ + (row1) = __builtin_ia32_movhlps (__t1, __t0); \ + (row2) = __builtin_ia32_movlhps (__t2, __t3); \ + (row3) = __builtin_ia32_movhlps (__t3, __t2); \ +} while (0) + +/* APPLE LOCAL begin nodebug inline 4152603 */ +#undef __always_inline__ +/* APPLE LOCAL end nodebug inline 4152603 */ + +/* For backward source compatibility. */ +#include + +#endif /* __SSE__ */ +#endif /* _XMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/libcc_kext.a b/lib/gcc/i686-apple-darwin11/4.2.1/libcc_kext.a new file mode 100644 index 0000000..8081585 Binary files /dev/null and b/lib/gcc/i686-apple-darwin11/4.2.1/libcc_kext.a differ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/libgcc.a b/lib/gcc/i686-apple-darwin11/4.2.1/libgcc.a new file mode 100644 index 0000000..4cbeb76 Binary files /dev/null and b/lib/gcc/i686-apple-darwin11/4.2.1/libgcc.a differ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/libgcc_eh.a b/lib/gcc/i686-apple-darwin11/4.2.1/libgcc_eh.a new file mode 100644 index 0000000..dce33d5 Binary files /dev/null and b/lib/gcc/i686-apple-darwin11/4.2.1/libgcc_eh.a differ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/libgcc_static.a b/lib/gcc/i686-apple-darwin11/4.2.1/libgcc_static.a new file mode 100644 index 0000000..2a04859 Binary files /dev/null and b/lib/gcc/i686-apple-darwin11/4.2.1/libgcc_static.a differ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/libgcov.a b/lib/gcc/i686-apple-darwin11/4.2.1/libgcov.a new file mode 100644 index 0000000..44c74cf Binary files /dev/null and b/lib/gcc/i686-apple-darwin11/4.2.1/libgcov.a differ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/libgomp.a b/lib/gcc/i686-apple-darwin11/4.2.1/libgomp.a new file mode 100644 index 0000000..cfd30c8 Binary files /dev/null and b/lib/gcc/i686-apple-darwin11/4.2.1/libgomp.a differ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/libgomp.spec b/lib/gcc/i686-apple-darwin11/4.2.1/libgomp.spec new file mode 100644 index 0000000..7102255 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/libgomp.spec @@ -0,0 +1,3 @@ +# This spec file is read by gcc when linking. It is used to specify the +# standard libraries we need in order to link with -fopenmp. +*link_gomp: -lgomp %{static: } diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/libstdc++.dylib b/lib/gcc/i686-apple-darwin11/4.2.1/libstdc++.dylib new file mode 100755 index 0000000..84c8372 Binary files /dev/null and b/lib/gcc/i686-apple-darwin11/4.2.1/libstdc++.dylib differ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/crt3.o b/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/crt3.o new file mode 100644 index 0000000..eb28ab0 Binary files /dev/null and b/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/crt3.o differ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgcc.a b/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgcc.a new file mode 100644 index 0000000..c38d854 Binary files /dev/null and b/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgcc.a differ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgcc_eh.a b/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgcc_eh.a new file mode 100644 index 0000000..65a3218 Binary files /dev/null and b/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgcc_eh.a differ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgcov.a b/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgcov.a new file mode 100644 index 0000000..4e47360 Binary files /dev/null and b/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgcov.a differ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgomp.a b/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgomp.a new file mode 100644 index 0000000..6da8037 Binary files /dev/null and b/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgomp.a differ diff --git a/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgomp.spec b/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgomp.spec new file mode 100644 index 0000000..7102255 --- /dev/null +++ b/lib/gcc/i686-apple-darwin11/4.2.1/x86_64/libgomp.spec @@ -0,0 +1,3 @@ +# This spec file is read by gcc when linking. It is used to specify the +# standard libraries we need in order to link with -fopenmp. +*link_gomp: -lgomp %{static: } diff --git a/libexec/gcc/i686-apple-darwin10/4.2.1/as b/libexec/gcc/i686-apple-darwin10/4.2.1/as new file mode 120000 index 0000000..159082c --- /dev/null +++ b/libexec/gcc/i686-apple-darwin10/4.2.1/as @@ -0,0 +1 @@ +../../../../bin/as \ No newline at end of file diff --git a/libexec/gcc/i686-apple-darwin10/4.2.1/cc1 b/libexec/gcc/i686-apple-darwin10/4.2.1/cc1 new file mode 100755 index 0000000..aa6e319 Binary files /dev/null and b/libexec/gcc/i686-apple-darwin10/4.2.1/cc1 differ diff --git a/libexec/gcc/i686-apple-darwin10/4.2.1/cc1obj b/libexec/gcc/i686-apple-darwin10/4.2.1/cc1obj new file mode 100755 index 0000000..4378906 Binary files /dev/null and b/libexec/gcc/i686-apple-darwin10/4.2.1/cc1obj differ diff --git a/libexec/gcc/i686-apple-darwin10/4.2.1/cc1objplus b/libexec/gcc/i686-apple-darwin10/4.2.1/cc1objplus new file mode 100755 index 0000000..0b9918f Binary files /dev/null and b/libexec/gcc/i686-apple-darwin10/4.2.1/cc1objplus differ diff --git a/libexec/gcc/i686-apple-darwin10/4.2.1/cc1plus b/libexec/gcc/i686-apple-darwin10/4.2.1/cc1plus new file mode 100755 index 0000000..67f9741 Binary files /dev/null and b/libexec/gcc/i686-apple-darwin10/4.2.1/cc1plus differ diff --git a/libexec/gcc/i686-apple-darwin10/4.2.1/collect2 b/libexec/gcc/i686-apple-darwin10/4.2.1/collect2 new file mode 100755 index 0000000..4567dc6 Binary files /dev/null and b/libexec/gcc/i686-apple-darwin10/4.2.1/collect2 differ diff --git a/libexec/gcc/i686-apple-darwin10/4.2.1/ld b/libexec/gcc/i686-apple-darwin10/4.2.1/ld new file mode 120000 index 0000000..123a2b3 --- /dev/null +++ b/libexec/gcc/i686-apple-darwin10/4.2.1/ld @@ -0,0 +1 @@ +../../../../bin/ld \ No newline at end of file diff --git a/libexec/gcc/i686-apple-darwin11/4.2.1/as b/libexec/gcc/i686-apple-darwin11/4.2.1/as new file mode 120000 index 0000000..159082c --- /dev/null +++ b/libexec/gcc/i686-apple-darwin11/4.2.1/as @@ -0,0 +1 @@ +../../../../bin/as \ No newline at end of file diff --git a/libexec/gcc/i686-apple-darwin11/4.2.1/cc1 b/libexec/gcc/i686-apple-darwin11/4.2.1/cc1 new file mode 100755 index 0000000..daf70dc Binary files /dev/null and b/libexec/gcc/i686-apple-darwin11/4.2.1/cc1 differ diff --git a/libexec/gcc/i686-apple-darwin11/4.2.1/cc1obj b/libexec/gcc/i686-apple-darwin11/4.2.1/cc1obj new file mode 100755 index 0000000..b0f6a82 Binary files /dev/null and b/libexec/gcc/i686-apple-darwin11/4.2.1/cc1obj differ diff --git a/libexec/gcc/i686-apple-darwin11/4.2.1/cc1objplus b/libexec/gcc/i686-apple-darwin11/4.2.1/cc1objplus new file mode 100755 index 0000000..03f7482 Binary files /dev/null and b/libexec/gcc/i686-apple-darwin11/4.2.1/cc1objplus differ diff --git a/libexec/gcc/i686-apple-darwin11/4.2.1/cc1plus b/libexec/gcc/i686-apple-darwin11/4.2.1/cc1plus new file mode 100755 index 0000000..26e9de9 Binary files /dev/null and b/libexec/gcc/i686-apple-darwin11/4.2.1/cc1plus differ diff --git a/libexec/gcc/i686-apple-darwin11/4.2.1/collect2 b/libexec/gcc/i686-apple-darwin11/4.2.1/collect2 new file mode 100755 index 0000000..3567bfb Binary files /dev/null and b/libexec/gcc/i686-apple-darwin11/4.2.1/collect2 differ diff --git a/libexec/gcc/i686-apple-darwin11/4.2.1/ld b/libexec/gcc/i686-apple-darwin11/4.2.1/ld new file mode 120000 index 0000000..123a2b3 --- /dev/null +++ b/libexec/gcc/i686-apple-darwin11/4.2.1/ld @@ -0,0 +1 @@ +../../../../bin/ld \ No newline at end of file -- cgit v1.2.3